AlaouiMdaghriAhmed commited on
Commit
775c806
·
1 Parent(s): b3d3bec

file swap

Browse files
app.py → app1.py RENAMED
@@ -1,158 +1,44 @@
1
- from hugchat import hugchat
2
- from hugchat.login import Login
3
- from pyecore.resources import ResourceSet, URI
4
  import gradio as gr
5
- import logging
6
- from pathlib import Path
7
- from typing import List, Optional, Tuple
8
 
9
  from dotenv import load_dotenv
10
 
11
  load_dotenv()
12
 
13
- import verify
14
 
15
- from queue import Empty, Queue
16
- from threading import Thread
17
 
18
- import gradio as gr
19
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
20
- from langchain_community.chat_models import ChatOpenAI
21
- from langchain.prompts import HumanMessagePromptTemplate
22
- from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
23
  # adapted from https://github.com/hwchase17/langchain/issues/2428#issuecomment-1512280045
24
- from queue import Queue
25
- from typing import Any
26
 
27
- from langchain.callbacks.base import BaseCallbackHandler
28
  from datasets import load_dataset
29
 
30
- dataset = load_dataset("VeryMadSoul/NLD")
31
-
32
-
33
- class QueueCallback(BaseCallbackHandler):
34
- """Callback handler for streaming LLM responses to a queue."""
35
-
36
- def __init__(self, queue: Queue):
37
- self.queue = queue
38
-
39
- def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
40
- self.queue.put(token)
41
-
42
- def on_llm_end(self, *args, **kwargs: Any) -> None:
43
- return self.queue.empty()
44
-
45
- def apply_hf_settings_button(prompt, model_name) :
46
- verify.chatbot.switch_llm(HF_MODELS_NAMES.index(model_name))
47
- verify.chatbot.new_conversation(switch_to = True)
48
- return "",[]
49
-
50
- HF_MODELS_NAMES = [model.name for model in verify.chatbot.get_available_llm_models()]
51
-
52
-
53
- DEFAULT_TEMPERATURE = 0.2
54
-
55
- ChatHistory = List[str]
56
-
57
- logging.basicConfig(
58
- format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO
59
- )
60
- # load up our system prompt
61
- default_system_prompt = '''You are a systems engineer, expert in model driven engineering and meta-modeling
62
- Your OUTPUT should always be an ecore xmi in this format :
63
-
64
- ```XML
65
-
66
- YOUR CODE HERE
67
-
68
- ```
69
- '''
70
- # for the human, we will just inject the text
71
- human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}")
72
-
73
 
74
- def on_message_button_click(
75
- chat: Optional[ChatOpenAI],
76
- message: str,
77
- chatbot_messages: ChatHistory,
78
- messages: List[BaseMessage],
79
- ) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]:
80
- if chat is None:
81
- # in the queue we will store our streamed tokens
82
- queue = Queue()
83
- # let's create our default chat
84
- chat = ChatOpenAI(
85
- model_name=GPT_MODELS_NAMES[0],
86
- temperature=DEFAULT_TEMPERATURE,
87
- streaming=True,
88
- callbacks=([QueueCallback(queue)]),
89
- )
90
- else:
91
- # hacky way to get the queue back
92
- queue = chat.callbacks[0].queue
93
 
94
- job_done = object()
 
 
 
 
95
 
96
- logging.info(f"Asking question to GPT, messages={messages}")
97
- # let's add the messages to our stuff
98
- messages.append(HumanMessage(content=message))
99
- chatbot_messages.append((message, ""))
100
- # this is a little wrapper we need cuz we have to add the job_done
101
- def task():
102
- chat(messages)
103
- queue.put(job_done)
104
 
105
- # now let's start a thread and run the generation inside it
106
- t = Thread(target=task)
107
- t.start()
108
- # this will hold the content as we generate
109
- content = ""
110
- # now, we read the next_token from queue and do what it has to be done
111
- while True:
112
- try:
113
- next_token = queue.get(True, timeout=1)
114
- if next_token is job_done:
115
- break
116
- content += next_token
117
- chatbot_messages[-1] = (message, content)
118
- yield chat, "", chatbot_messages, messages
119
- except Empty:
120
- continue
121
- # finally we can add our reply to messsages
122
- messages.append(AIMessage(content=content))
123
- logging.debug(f"reply = {content}")
124
- logging.info(f"Done!")
125
- return chat, "", chatbot_messages, messages
126
 
127
-
128
- def system_prompt_handler(value: str) -> str:
129
- return value
130
-
131
-
132
- def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]:
133
- return "", [], [SystemMessage(content=system_prompt)]
134
-
135
-
136
- def on_apply_settings_button_click(
137
- system_prompt: str, model_name: str, temperature: float
138
- ):
139
- logging.info(
140
- f"Applying settings: model_name={model_name}, temperature={temperature}"
141
- )
142
- chat = ChatOpenAI(
143
- model_name=model_name,
144
- temperature=temperature,
145
- streaming=True,
146
- callbacks=[QueueCallback(Queue())],
147
- )
148
- # don't forget to nuke our queue
149
- chat.callbacks[0].queue.empty()
150
- return chat, *on_clear_button_click(system_prompt)
151
-
152
-
153
-
154
-
155
-
156
  def trigger_example(example):
157
  chat, updated_history = generate_response(example)
158
  return chat, updated_history
@@ -160,14 +46,15 @@ def trigger_example(example):
160
  def generate_response(user_message, history):
161
 
162
  #history.append((user_message,str(chatbot.chat(user_message))))
163
- history, errors = verify.iterative_prompting(user_message,verify.description)
164
  return "", history
165
 
 
 
 
 
166
  def clear_chat():
167
  return [], []
168
-
169
- examples = [dataset['train'][i]['NLD'] for i in range(len(dataset['train']))]
170
-
171
  custom_css = """
172
  #logo-img {
173
  border: none !important;
@@ -177,26 +64,26 @@ custom_css = """
177
  min-height: 300px;
178
  }
179
  """
180
- GPT_MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4",'gpt-4o']
181
-
182
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
183
 
184
 
185
- with gr.Tab("HF_API"):
186
  with gr.Row():
187
  with gr.Column(scale=1):
188
  gr.Image("images\logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
189
  with gr.Column(scale=3):
190
  gr.Markdown("""This Chatbot has been made to showcase our work on generating meta-model from textual descriptions.
191
- <br/><br/>
192
- The output of this conversation is going to be an ecore file that is validated by PyEcore [Pyecore (https://github.com/pyecore/pyecore)]
193
- <br/>
194
- Available Models : <br>
195
- - Cohere4ai-command-r-plus<br>
196
- - Llama-3-70B<br>
197
-
198
- """
199
- )
 
200
 
201
  with gr.Row():
202
  chatbot1 = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
@@ -218,111 +105,51 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
218
  submit_button.click(fn=generate_response, inputs=[user_message, chatbot1], outputs=[user_message, chatbot1], concurrency_limit=32)
219
 
220
  clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot1, history], concurrency_limit=32)
 
221
 
 
222
  with gr.Accordion("Settings", open=False):
223
  model_name = gr.Dropdown(
224
- choices=HF_MODELS_NAMES, value=HF_MODELS_NAMES[0], label="model"
225
  )
226
  settings_button = gr.Button("Apply")
227
  settings_button.click(
228
- apply_hf_settings_button,
229
  [user_message,model_name],
230
  [user_message, chatbot1],
231
  )
232
 
233
 
234
-
235
  with gr.Row():
236
  gr.Examples(
237
  examples=examples,
238
  inputs=user_message,
239
  cache_examples=False,
240
  fn=trigger_example,
241
- outputs=[chatbot],
242
  examples_per_page=100
243
  )
244
  #user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
245
  #submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
246
  #clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
 
247
 
248
-
249
- with gr.Tab("OPENAI"):
250
- system_prompt = gr.State(default_system_prompt)
251
- # here we keep our state so multiple user can use the app at the same time!
252
- messages = gr.State([SystemMessage(content=default_system_prompt)])
253
- # same thing for the chat, we want one chat per use so callbacks are unique I guess
254
- chat = gr.State(None)
255
-
256
- with gr.Column(elem_id="col_container"):
257
- with gr.Row():
258
- with gr.Column(scale=1):
259
- gr.Image("images\logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
260
- with gr.Column(scale=3):
261
- gr.Markdown("""This Chatbot has been made to showcase our work on generating meta-model from textual descriptions.
262
- <br/><br/>
263
- The output of this conversation is going to be an ecore file that is validated by PyEcore [Pyecore (https://github.com/pyecore/pyecore)]
264
- <br/>
265
- Available Models : <br>
266
- - GPT3-Turbo<br>
267
- - GPT4-Turbo<br>
268
- - GPT4-Omni
269
-
270
- """
271
- )
272
-
273
-
274
- chatbot1 = gr.Chatbot()
275
- with gr.Column():
276
- message = gr.Textbox(label="chat input")
277
- message.submit(
278
- on_message_button_click,
279
- [chat, message, chatbot1, messages],
280
- [chat, message, chatbot1, messages],
281
- queue=True,
282
- )
283
- message_button = gr.Button("Submit", variant="primary")
284
- message_button.click(
285
- on_message_button_click,
286
- [chat, message, chatbot1, messages],
287
- [chat, message, chatbot1, messages],
288
- )
289
- with gr.Row():
290
- with gr.Column():
291
- clear_button = gr.Button("Clear")
292
- clear_button.click(
293
- on_clear_button_click,
294
- [system_prompt],
295
- [message, chatbot1, messages],
296
- queue=False,
297
- )
298
- with gr.Accordion("Settings", open=False):
299
- model_name = gr.Dropdown(
300
- choices=GPT_MODELS_NAMES, value=GPT_MODELS_NAMES[0], label="model"
301
- )
302
- temperature = gr.Slider(
303
- minimum=0.0,
304
- maximum=1.0,
305
- value=0.7,
306
- step=0.1,
307
- label="temperature",
308
- interactive=True,
309
- )
310
- apply_settings_button = gr.Button("Apply")
311
- apply_settings_button.click(
312
- on_apply_settings_button_click,
313
- [system_prompt, model_name, temperature],
314
- [chat, message, chatbot1, messages],
315
- )
316
- with gr.Row():
317
- gr.Examples(
318
- examples=examples,
319
- inputs=message,
320
- cache_examples=False,
321
- fn=on_message_button_click,
322
- outputs=[chat, message, chatbot1, messages],
323
- examples_per_page=100
324
- )
325
-
326
 
327
  if __name__ == "__main__":
328
  # demo.launch(debug=True)
 
1
+
 
 
2
  import gradio as gr
3
+ import os
 
 
4
 
5
  from dotenv import load_dotenv
6
 
7
  load_dotenv()
8
 
9
+ import verify2
10
 
 
 
11
 
 
 
 
 
 
12
  # adapted from https://github.com/hwchase17/langchain/issues/2428#issuecomment-1512280045
 
 
13
 
 
14
  from datasets import load_dataset
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ BASE_DIR = 'outs'
18
+ def list_files(directory):
19
+ dir_path = os.path.join(BASE_DIR, directory)
20
+ if not os.path.exists(dir_path):
21
+ return []
22
+ files = os.listdir(dir_path)
23
+ return files
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ def file_content(directory, file_name):
26
+ file_path = os.path.join(BASE_DIR, directory, file_name)
27
+ with open(file_path, 'r') as file:
28
+ content = file.read()
29
+ return content
30
 
31
+ def download_file(directory, file_name):
32
+ file_path = os.path.join(BASE_DIR, directory, file_name)
33
+ return file_path
 
 
 
 
 
34
 
35
+ examples = [
36
+ '''SimplePDL is an experimental language for specifying processes. The SPEM standard (Software Process Engineering Metamodel) proposed by the OMG inspired our work, but we also took ideas from the UMA metamodel (Unified Method Architecture) used in the EPF Eclipse plug-in (Eclipse Process Framework), dedicated to process modeling. SimplePDL is simplified to keep the presentation simple.
37
+ Its metamodel is given in the figure 1. It defines the process concept (Process) composed of a set of work definitions (WorkDefinition) representing the activities to be performed during the development. One workdefinition may depend upon another (WorkSequence). In such a case, an ordering constraint (linkType) on the second workdefinition is specified, using the enumeration WorkSequenceType. For example, linking two workdefinitions wd1 and wd2 by a precedence relation of kind finishToStart means that wd2 can be started only if wd1 is finished (and respectively for startToStart, startToFinish and finishToFinish). SimplePDL does also allow to explicitly represent resources (Resource) that are needed in order to perform one workdefinition (designer, computer, server...) and also time constraints (min_time and max_time on WorkDefinition and Process) to specify the minimum (resp. maximum) time allowed to perform the workdefinition or the whole process.''',
38
+ " A FSM is conceived as an abstract machine that can be in one of a finite number of states. The machine is in only one state at a time; the state it is in at any given time is called the current state. It can change from one state to another when initiated by a triggering event or condition; this is called a transition. A particular FSM is defined by a list of its states, and the triggering condition for each transition.",
39
+ "Un Website est l'élément racine. Il est décrit par deux attributs (copyright et isMobileFriendly) et par une composition d'une ou plusieurs pages. Une page est décrite par deux attributs (son nom et son titre), ainsi que par des références à d'autres pages."
40
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def trigger_example(example):
43
  chat, updated_history = generate_response(example)
44
  return chat, updated_history
 
46
  def generate_response(user_message, history):
47
 
48
  #history.append((user_message,str(chatbot.chat(user_message))))
49
+ history, errors = verify2.iterative_prompting(user_message,verify2.description,model=verify2.model)
50
  return "", history
51
 
52
+ def apply_gpt_settings_button(prompt, model_name):
53
+ verify2.model = model_name
54
+ return "", []
55
+
56
  def clear_chat():
57
  return [], []
 
 
 
58
  custom_css = """
59
  #logo-img {
60
  border: none !important;
 
64
  min-height: 300px;
65
  }
66
  """
67
+ GPT_MODELS_NAMES = ["gpt-4-turbo", "gpt-4o", "gpt-3.5-turbo"]
 
68
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
69
 
70
 
71
+ with gr.Tab("OPENAI API"):
72
  with gr.Row():
73
  with gr.Column(scale=1):
74
  gr.Image("images\logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
75
  with gr.Column(scale=3):
76
  gr.Markdown("""This Chatbot has been made to showcase our work on generating meta-model from textual descriptions.
77
+ <br/><br/>
78
+ The output of this conversation is going to be an ecore file that is validated by PyEcore [Pyecore (https://github.com/pyecore/pyecore)]
79
+ <br/>
80
+ Available Models : <br>
81
+ - GPT3-Turbo<br>
82
+ - GPT4-Turbo<br>
83
+ - GPT4-Omni
84
+
85
+ """
86
+ )
87
 
88
  with gr.Row():
89
  chatbot1 = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
 
105
  submit_button.click(fn=generate_response, inputs=[user_message, chatbot1], outputs=[user_message, chatbot1], concurrency_limit=32)
106
 
107
  clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot1, history], concurrency_limit=32)
108
+
109
 
110
+
111
  with gr.Accordion("Settings", open=False):
112
  model_name = gr.Dropdown(
113
+ choices=GPT_MODELS_NAMES, value=GPT_MODELS_NAMES[0], label="model"
114
  )
115
  settings_button = gr.Button("Apply")
116
  settings_button.click(
117
+ apply_gpt_settings_button,
118
  [user_message,model_name],
119
  [user_message, chatbot1],
120
  )
121
 
122
 
 
123
  with gr.Row():
124
  gr.Examples(
125
  examples=examples,
126
  inputs=user_message,
127
  cache_examples=False,
128
  fn=trigger_example,
129
+ outputs=[chatbot1],
130
  examples_per_page=100
131
  )
132
  #user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
133
  #submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
134
  #clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
135
+ with gr.Tab("File Browser"):
136
 
137
+ directory_dropdown = gr.Dropdown(choices=["HF", "OAI"], label="Select Directory")
138
+ file_dropdown = gr.Dropdown(choices=[], label="Files")
139
+ file_content_display = gr.Textbox(label="File Content", lines=10, interactive=False)
140
+ download_button = gr.File(label="Download File")
141
+
142
+ def update_file_list(directory):
143
+ files = list_files(directory)
144
+ return gr.Dropdown(choices=files)
145
+
146
+ def update_file_content_and_path(directory, file_name):
147
+ content = file_content(directory, file_name)
148
+ file_path = download_file(directory, file_name)
149
+ return content, file_path
150
+
151
+ directory_dropdown.change(update_file_list, inputs=directory_dropdown, outputs=file_dropdown)
152
+ file_dropdown.change(update_file_content_and_path, inputs=[directory_dropdown, file_dropdown], outputs=[file_content_display, download_button])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  if __name__ == "__main__":
155
  # demo.launch(debug=True)
cookies/mad.mik788@gmail.com.json DELETED
@@ -1 +0,0 @@
1
- {"hf-chat": "c50ecd5a-49c6-4476-bbf6-79889e915ceb", "token": "QRBRiJammSDwTGqibPwKOClSgxOJnjubtRtrwjfAfWJxBGNNIKhitDezddYOGTClZSYoqsKSNbLAaLvsJuePsicMWYDRkFwkoiALdDWyWyPDiYiaeLgJlTkHCYVMpich"}
 
 
verify.py → verify2.py RENAMED
@@ -1,19 +1,15 @@
1
- from hugchat import hugchat
2
- from hugchat.login import Login
3
  from pyecore.resources import ResourceSet, URI
4
  import os
5
-
6
- from pathlib import Path
7
- from datasets import load_dataset
8
- import pandas as pd
9
  from datasets import Dataset,DatasetDict, load_dataset
10
  from huggingface_hub import CommitScheduler
11
  from uuid import uuid4
12
  import json
13
  from datetime import datetime
 
 
14
 
15
-
16
- import os
17
  JSON_DATASET_DIR = Path("json_dataset")
18
  JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True)
19
 
@@ -32,29 +28,34 @@ def save_json(model: str, errors: list) -> None:
32
  json.dump({"model": model, "error": errors, "datetime": datetime.now().isoformat()}, f)
33
  f.write("\n")
34
 
35
- # Log into huggingface and grant authorization to huggingchat
36
- EMAIL = os.environ['HF_EMAIL']
37
- PASSWD = os.environ['HF_PASSWORD']
38
- cookie_path_dir = "./cookies/" # NOTE: trailing slash (/) is required to avoid errors
39
- sign = Login(EMAIL, PASSWD)
40
- cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)
41
 
42
- # Create your ChatBot
43
- chatbot = hugchat.ChatBot(cookies=cookies.get_dict(), system_prompt = '''You are a systems engineer, expert in model driven engineering and meta-modeling
44
- Your OUTPUT should always follow this format :
 
 
 
45
 
46
  ```xml
47
 
48
- < YOUR CODE HERE >
49
 
50
  ```
51
- ''') # or cookie_path="usercookies/<email>.json"
52
- chatbot.switch_llm(1)
 
 
 
 
 
 
 
 
 
53
 
54
- # Create a new conversation
55
- chatbot.new_conversation(switch_to = True) # switch to the new conversation
56
 
57
- #create prompt
58
  NLD= '''SimplePDL is an experimental language for specifying processes. The SPEM standard (Software Process Engineering Metamodel) proposed by the OMG inspired our work, but we also took ideas from the UMA metamodel (Unified Method Architecture) used in the EPF Eclipse plug-in (Eclipse Process Framework), dedicated to process modeling. SimplePDL is simplified to keep the presentation simple.
59
  Its metamodel is given in the figure 1. It defines the process concept (Process) composed of a set of work definitions (WorkDefinition) representing the activities to be performed during the development. One workdefinition may depend upon another (WorkSequence). In such a case, an ordering constraint (linkType) on the second workdefinition is specified, using the enumeration WorkSequenceType. For example, linking two workdefinitions wd1 and wd2 by a precedence relation of kind finishToStart means that wd2 can be started only if wd1 is finished (and respectively for startToStart, startToFinish and finishToFinish). SimplePDL does also allow to explicitly represent resources (Resource) that are needed in order to perform one workdefinition (designer, computer, server...) and also time constraints (min_time and max_time on WorkDefinition and Process) to specify the minimum (resp. maximum) time allowed to perform the workdefinition or the whole process.'''
60
  description='''# Writing Ecore Files
@@ -190,129 +191,159 @@ Ecore also supports the definition of operations and constraints on model elemen
190
 
191
 
192
  ##Reaccuring errors :
193
- Invalid tag name error is linked to the tag <!-- --> don't use it in the syntax
194
 
195
  ## Conclusion
196
 
197
  Ecore files provide a structured way to define models using an XML-based syntax. By understanding the syntax and semantics of Ecore files, developers can create robust and well-defined models that can be used as the foundation for various tools and applications within the Eclipse Modeling Framework.'''
198
- #prompt= "Convert the following description into an ecore xmi representation:\n" + NLD + "\n Here's a technical document of how to write correct ecore file:\n" + description #WHen tryin to add the description
199
-
200
-
201
- # Non stream response
202
- #query_result0 = chatbot.chat(prompt)
203
- #print(query_result0) # or query_result.text or query_result["text"]
204
-
205
- '''
206
- # Stream response
207
- for resp in chatbot.query(
208
- "Hello",
209
- stream=True
210
- ):
211
- print(resp)
212
-
213
- # Web search (new feature)
214
- query_result = chatbot.query("Hi!", web_search=True)
215
- print(query_result)
216
- for source in query_result.web_search_sources:
217
- print(source.link)
218
- print(source.title)
219
- print(source.hostname)
220
- '''
221
-
222
- # Create a new conversation
223
- #chatbot.new_conversation(switch_to = True) # switch to the new conversation
224
-
225
- # Get conversations on the server that are not from the current session (all your conversations in huggingchat)
226
- #conversation_list = chatbot.get_remote_conversations(replace_conversation_list=True)
227
- # Get conversation list(local)
228
- #conversation_list = chatbot.get_conversation_list()
229
-
230
- # Get the available models (not hardcore)
231
- #models = chatbot.get_available_llm_models()
232
-
233
- # Switch model with given index
234
-
235
- #chatbot.switch_llm(2) # Switch to the second model
236
-
237
- # Get information about the current conversation
238
- #info = chatbot.get_conversation_info()
239
- #print(info.id, info.title, info.model, info.system_prompt, info.history)
240
-
241
- ### Assistant
242
- #assistant = chatbot.search_assistant(assistant_name="ChatGpt") # assistant name list in https://huggingface.co/chat/assistants
243
- #assistant_list = chatbot.get_assistant_list_by_page(page=0)
244
- #chatbot.new_conversation(assistant=assistant, switch_to=True) # create a new conversation with assistant
245
- def initial_prompt(NLD, description):
246
- prompt= "Convert the following description into an ecore xmi representation:\n" + NLD + "\n Here's a technical document of how to write correct ecore file:\n" + description #WHen tryin to add the description
247
-
248
- return chatbot.chat(prompt)
249
-
250
- def fix_err(xmi, err):
251
- prompt="Fix the following error: " +str(err)+"\n in the following xmi :\n" + xmi+ "\n Here's a technical document of how to write correct ecore file:\n" + description
252
-
253
- return chatbot.chat(prompt)
254
 
255
  def verify_xmi(output,output_file_name):
256
  #here we're gonna verify our Model's output by using the either a tool or a developped solution XMI parser
257
 
258
  #Return can be either bool or preferably the actual compilation error or xmi line error
259
- output = str(output)
260
  #Returning a bool won't be that helpful ..
261
- with open("outs\output"+output_file_name+".ecore", "w") as file1:
262
  # Writing data to a file
263
- if "```xml" in output:
264
- file1.writelines(output[output.find("```xml")+len("```xml\n"):output.rfind("```")])
265
- else:
266
- file1.writelines(output[output.find("```")+len("```\n"):output.rfind("```")])
267
  try:
268
  rset = ResourceSet()
269
- resource = rset.get_resource(URI("outs\output"+output_file_name+".ecore"))
270
 
271
  except Exception as e:
272
  return e.args[0]
273
  return 'no e'
274
 
275
- def iterative_prompting(NLD, XMI,max_iter=3):
276
-
277
- history= []
278
 
279
- i=0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
-
282
- XMI=""
283
- output = initial_prompt(NLD, description)
284
- history.append((NLD,str(output)))
285
- print(output)
286
 
287
- correct_syntax= verify_xmi(output,str(i))
288
- errors =[]
289
- error = (correct_syntax == 'no e')
290
- errors.append(correct_syntax)
291
 
292
- while (not error) and i<=max_iter:
293
- i+=1
294
-
295
 
296
- #print('****************************************')
297
- #print('Iteration ' + str(i))
298
- #print('****************************************')
299
 
 
300
 
301
- error = "\n This Xmi was incorrect. Please fix the errors." + " "+str(correct_syntax)
302
-
303
- #print("**************************")
304
- #print(correct_syntax)
305
- #print("**************************")
306
 
307
 
308
- output = fix_err(output , correct_syntax)
309
- history.append((error,str(output)))
310
- #print(output)
311
- correct_syntax = verify_xmi(output,str(i))
312
- #print(correct_syntax)
313
- error = (correct_syntax == 'no e')
314
- errors.append(correct_syntax)
315
-
316
- save_json(chatbot.get_conversation_info().model, errors)
 
 
 
317
 
318
- return history, errors
 
 
1
+ from openai import OpenAI
2
+ import pyecore
3
  from pyecore.resources import ResourceSet, URI
4
  import os
 
 
 
 
5
  from datasets import Dataset,DatasetDict, load_dataset
6
  from huggingface_hub import CommitScheduler
7
  from uuid import uuid4
8
  import json
9
  from datetime import datetime
10
+ from pathlib import Path
11
+ import app1
12
 
 
 
13
  JSON_DATASET_DIR = Path("json_dataset")
14
  JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True)
15
 
 
28
  json.dump({"model": model, "error": errors, "datetime": datetime.now().isoformat()}, f)
29
  f.write("\n")
30
 
 
 
 
 
 
 
31
 
32
+ OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
33
+
34
+ model = "gpt-4o"
35
+
36
+ default_system_prompt = '''You are a systems engineer, expert in model driven engineering and meta-modeling
37
+ Your OUTPUT should always be an ecore xmi in this format :
38
 
39
  ```xml
40
 
41
+ YOUR CODE HERE
42
 
43
  ```
44
+ '''
45
+
46
+ fix_err_system_prompt = '''You are a systems engineer, expert in model driven engineering and meta-modeling,
47
+ Fix the xmi provided with the errors
48
+ NB : the error Invalid tag name refers to the <!-- --> tags
49
+
50
+ Your OUTPUT should always be an ecore xmi in this format :
51
+
52
+ ```xml
53
+
54
+ YOUR CODE HERE
55
 
56
+ ```
57
+ '''
58
 
 
59
  NLD= '''SimplePDL is an experimental language for specifying processes. The SPEM standard (Software Process Engineering Metamodel) proposed by the OMG inspired our work, but we also took ideas from the UMA metamodel (Unified Method Architecture) used in the EPF Eclipse plug-in (Eclipse Process Framework), dedicated to process modeling. SimplePDL is simplified to keep the presentation simple.
60
  Its metamodel is given in the figure 1. It defines the process concept (Process) composed of a set of work definitions (WorkDefinition) representing the activities to be performed during the development. One workdefinition may depend upon another (WorkSequence). In such a case, an ordering constraint (linkType) on the second workdefinition is specified, using the enumeration WorkSequenceType. For example, linking two workdefinitions wd1 and wd2 by a precedence relation of kind finishToStart means that wd2 can be started only if wd1 is finished (and respectively for startToStart, startToFinish and finishToFinish). SimplePDL does also allow to explicitly represent resources (Resource) that are needed in order to perform one workdefinition (designer, computer, server...) and also time constraints (min_time and max_time on WorkDefinition and Process) to specify the minimum (resp. maximum) time allowed to perform the workdefinition or the whole process.'''
61
  description='''# Writing Ecore Files
 
191
 
192
 
193
  ##Reaccuring errors :
194
+ Invalid tag name refers to <!-- --> which are not accepted in xmi format
195
 
196
  ## Conclusion
197
 
198
  Ecore files provide a structured way to define models using an XML-based syntax. By understanding the syntax and semantics of Ecore files, developers can create robust and well-defined models that can be used as the foundation for various tools and applications within the Eclipse Modeling Framework.'''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
  def verify_xmi(output,output_file_name):
201
  #here we're gonna verify our Model's output by using the either a tool or a developped solution XMI parser
202
 
203
  #Return can be either bool or preferably the actual compilation error or xmi line error
204
+
205
  #Returning a bool won't be that helpful ..
206
+ with open("outs\OAI\output"+output_file_name+".ecore", "w+") as file1:
207
  # Writing data to a file
208
+
209
+ file1.writelines(output[output.find("```xml")+len("```xml\n"):output.rfind("```")])
 
 
210
  try:
211
  rset = ResourceSet()
212
+ resource = rset.get_resource(URI("outs\OAI\output"+output_file_name+".ecore"))
213
 
214
  except Exception as e:
215
  return e.args[0]
216
  return 'no e'
217
 
 
 
 
218
 
219
+ def fix_err(xmi, err , model, max_tokens = 2000):
220
+ client = OpenAI()
221
+
222
+ response = client.chat.completions.create(
223
+ model=model,
224
+ messages=[
225
+ {
226
+ "role": "system",
227
+ "content": [
228
+ {
229
+ "type": "text",
230
+ "text": fix_err_system_prompt
231
+ }
232
+ ]
233
+ },
234
+ {
235
+ "role": "user",
236
+ "content": [
237
+ {
238
+ "type": "text",
239
+ "text": " \n Fix the following error: " +str(err)+"\n in the following xmi :\n" + xmi+"\n \n Output only the code ." #NLD #description + "\nConvert to XMI:\n" + NLD #WHen tryin to add the description
240
+ }
241
+ ]
242
+ }
243
+ ],
244
+ temperature=0.2,
245
+ max_tokens=max_tokens,
246
+ top_p=1,
247
+ frequency_penalty=0,
248
+ presence_penalty=0,
249
+ )
250
+ return response.choices[0].message.content
251
+
252
+
253
+ def prompt(NLD, description,model=model, max_tokens=2000):
254
+ client = OpenAI()
255
+
256
+ """prompt.
257
+
258
+ Parameters
259
+ ----------
260
+ NLD :
261
+ Natural Language Description
262
+ description :
263
+ description
264
+ model :
265
+ OpenAI model
266
+ max_tokens :
267
+ max_tokens
268
+
269
+ """
270
+ response = client.chat.completions.create(
271
+ model=model,
272
+ messages=[
273
+ {
274
+ "role": "system",
275
+ "content": [
276
+ {
277
+ "type": "text",
278
+ "text": default_system_prompt
279
+ }
280
+ ]
281
+ },
282
+ {
283
+ "role": "user",
284
+ "content": [
285
+ {
286
+ "type": "text",
287
+ "text" : "Convert the following description into an ecore xmi representation:\n" + NLD + "\n Here's a technical document if you need it of how to write correct ecore file:\n" + description #WHen tryin to add the description
288
+
289
+ }
290
+ ]
291
+ }
292
+ ],
293
+ temperature=0.2,
294
+ max_tokens=max_tokens,
295
+ top_p=1,
296
+ frequency_penalty=0,
297
+ presence_penalty=0,
298
+ )
299
+ return response.choices[0].message.content
300
+
301
+
302
+ def iterative_prompting(NLD, XMI,max_iter=3,model=model):
303
+ history= []
304
+
305
+
306
+ i=0
307
+
308
+
309
+ task = "\nConvert to XMI, Output only the code :\n"
310
+ XMI=""
311
+ gpt4o_output = prompt(NLD, description, model)
312
+ history.append((NLD,str(gpt4o_output)))
313
+
314
+ correct_syntax= verify_xmi(gpt4o_output,str(i))
315
 
316
+ error = (correct_syntax == 'no e')
317
+ errors = []
318
+ errors.append(correct_syntax)
319
+ while not error and i<=3:
320
+ i+=1
321
 
322
+ print('****************************************')
323
+ print('Iteration ' + str(i))
324
+ print('****************************************')
 
325
 
 
 
 
326
 
327
+ error = "\n This Xmi was incorrect. Please fix the errors : " + " "+str(correct_syntax)
328
+ print("**************************")
 
329
 
330
+ print(correct_syntax)
331
 
332
+ print("**************************")
 
 
 
 
333
 
334
 
335
+ gpt4o_output = fix_err(gpt4o_output , correct_syntax, model)
336
+ history.append((error,str(gpt4o_output)))
337
+
338
+ print(gpt4o_output)
339
+
340
+ correct_syntax = verify_xmi(gpt4o_output,str(i))
341
+ print(correct_syntax)
342
+
343
+
344
+
345
+ error = (correct_syntax == 'no e')
346
+ errors.append(correct_syntax)
347
 
348
+ save_json(model, errors)
349
+ return history, errors