neuralworm commited on
Commit
f7bdfc9
·
verified ·
1 Parent(s): 06e9d76

Update cognitive_mapping_probe/orchestrator_seismograph.py

Browse files
cognitive_mapping_probe/orchestrator_seismograph.py CHANGED
@@ -3,10 +3,11 @@ import numpy as np
3
  import gc
4
  from typing import Dict, Any, Optional, List
5
 
6
- from .llm_iface import get_or_load_model, LLM
7
  from .resonance_seismograph import run_cogitation_loop, run_silent_cogitation_seismic
8
  from .concepts import get_concept_vector
9
  from .introspection import generate_introspective_report
 
10
  from .utils import dbg
11
 
12
  def run_seismic_analysis(
@@ -20,53 +21,71 @@ def run_seismic_analysis(
20
  llm_instance: Optional[LLM] = None,
21
  injection_vector_cache: Optional[torch.Tensor] = None
22
  ) -> Dict[str, Any]:
23
- """Orchestriert eine einzelne seismische Analyse (Phase 1)."""
 
 
 
24
  local_llm_instance = False
25
- if llm_instance is None:
26
- progress_callback(0.0, desc=f"Loading model '{model_id}'...")
27
- llm = get_or_load_model(model_id, seed)
28
- local_llm_instance = True
29
- else:
30
- llm = llm_instance
31
- llm.set_all_seeds(seed)
32
-
33
- injection_vector = None
34
- if concept_to_inject and concept_to_inject.strip():
35
- if injection_vector_cache is not None:
36
- dbg(f"Using cached injection vector for '{concept_to_inject}'.")
37
- injection_vector = injection_vector_cache
38
  else:
39
- progress_callback(0.2, desc=f"Vectorizing '{concept_to_inject}'...")
40
- injection_vector = get_concept_vector(llm, concept_to_inject.strip())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- progress_callback(0.3, desc=f"Recording dynamics for '{prompt_type}'...")
 
 
 
 
43
 
44
- state_deltas = run_silent_cogitation_seismic(
45
- llm=llm, prompt_type=prompt_type,
46
- num_steps=num_steps, temperature=0.1,
47
- injection_vector=injection_vector, injection_strength=injection_strength
48
- )
 
 
 
 
 
 
49
 
50
- progress_callback(0.9, desc="Analyzing...")
51
 
52
- if state_deltas:
53
- deltas_np = np.array(state_deltas)
54
- stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)), "min_delta": float(np.min(deltas_np)), }
55
- verdict = f"### ✅ Seismic Analysis Complete\nRecorded {len(deltas_np)} steps for '{prompt_type}'."
56
- if injection_vector is not None:
57
- verdict += f"\nModulated with **'{concept_to_inject}'** at strength **{injection_strength:.2f}**."
58
- else:
59
- stats, verdict = {}, "### ⚠️ Analysis Warning\nNo state changes recorded."
60
 
61
- results = { "verdict": verdict, "stats": stats, "state_deltas": state_deltas }
62
 
63
- if local_llm_instance:
64
- dbg(f"Releasing locally created model instance for '{model_id}'.")
65
- del llm, injection_vector
66
- gc.collect()
67
- if torch.cuda.is_available(): torch.cuda.empty_cache()
68
 
69
- return results
 
 
70
 
71
  def run_triangulation_probe(
72
  model_id: str,
@@ -78,62 +97,58 @@ def run_triangulation_probe(
78
  injection_strength: float = 0.0,
79
  llm_instance: Optional[LLM] = None,
80
  ) -> Dict[str, Any]:
81
- """
82
- Orchestriert ein vollständiges Triangulations-Experiment, jetzt mit optionaler Injektion.
83
- """
84
  local_llm_instance = False
85
- if llm_instance is None:
86
- progress_callback(0.0, desc=f"Loading model '{model_id}'...")
87
- llm = get_or_load_model(model_id, seed)
88
- local_llm_instance = True
89
- else:
90
- llm = llm_instance
91
- llm.set_all_seeds(seed)
92
-
93
- injection_vector = None
94
- if concept_to_inject and concept_to_inject.strip() and injection_strength > 0:
95
- if concept_to_inject.lower() == "random_noise":
96
- progress_callback(0.15, desc="Generating random noise vector...")
97
- hidden_dim = llm.stable_config.hidden_dim
98
- noise_vec = torch.randn(hidden_dim)
99
- base_norm = 70.0
100
- injection_vector = (noise_vec / torch.norm(noise_vec)) * base_norm
101
  else:
102
- progress_callback(0.15, desc=f"Vectorizing '{concept_to_inject}'...")
103
- injection_vector = get_concept_vector(llm, concept_to_inject.strip())
104
-
105
- progress_callback(0.3, desc=f"Phase 1/2: Recording dynamics for '{prompt_type}'...")
106
- state_deltas = run_silent_cogitation_seismic(
107
- llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
108
- injection_vector=injection_vector, injection_strength=injection_strength
109
- )
110
-
111
- progress_callback(0.7, desc="Phase 2/2: Generating introspective report...")
112
- report = generate_introspective_report(
113
- llm=llm, context_prompt_type=prompt_type,
114
- introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
115
- )
116
-
117
- progress_callback(0.9, desc="Analyzing...")
118
- if state_deltas:
119
- deltas_np = np.array(state_deltas)
120
- stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)) }
121
- verdict = "### ✅ Triangulation Probe Complete"
122
- else:
123
- stats, verdict = {}, "### ⚠️ Triangulation Warning"
124
-
125
- results = {
126
- "verdict": verdict, "stats": stats, "state_deltas": state_deltas,
127
- "introspective_report": report
128
- }
129
-
130
- if local_llm_instance:
131
- dbg(f"Releasing locally created model instance for '{model_id}'.")
132
- del llm, injection_vector
133
- gc.collect()
134
- if torch.cuda.is_available(): torch.cuda.empty_cache()
135
 
136
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  def run_causal_surgery_probe(
139
  model_id: str,
@@ -145,117 +160,29 @@ def run_causal_surgery_probe(
145
  progress_callback,
146
  reset_kv_cache_on_patch: bool = False
147
  ) -> Dict[str, Any]:
148
- """
149
- Orchestriert ein "Activation Patching"-Experiment, jetzt mit KV-Cache-Reset-Option.
150
- """
151
- progress_callback(0.0, desc=f"Loading model '{model_id}'...")
152
- llm = get_or_load_model(model_id, seed)
153
-
154
- progress_callback(0.1, desc=f"Phase 1/3: Recording source state ('{source_prompt_type}')...")
155
- source_results = run_cogitation_loop(
156
- llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
157
- temperature=0.1, record_states=True
158
- )
159
- state_history = source_results["state_history"]
160
- assert patch_step < len(state_history), f"Patch step {patch_step} is out of bounds."
161
- patch_state = state_history[patch_step]
162
- dbg(f"Source state at step {patch_step} recorded with norm {torch.norm(patch_state).item():.2f}.")
163
-
164
- progress_callback(0.4, desc=f"Phase 2/3: Running patched destination ('{dest_prompt_type}')...")
165
- patched_run_results = run_cogitation_loop(
166
- llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
167
- temperature=0.1, patch_step=patch_step, patch_state_source=patch_state,
168
- reset_kv_cache_on_patch=reset_kv_cache_on_patch
169
- )
170
-
171
- progress_callback(0.8, desc="Phase 3/3: Generating introspective report...")
172
- report = generate_introspective_report(
173
- llm=llm, context_prompt_type=dest_prompt_type,
174
- introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
175
- )
176
-
177
- progress_callback(0.95, desc="Analyzing...")
178
- deltas_np = np.array(patched_run_results["state_deltas"])
179
- stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)) }
180
-
181
- results = {
182
- "verdict": "### ✅ Causal Surgery Probe Complete",
183
- "stats": stats,
184
- "state_deltas": patched_run_results["state_deltas"],
185
- "introspective_report": report,
186
- "patch_info": {
187
- "source_prompt": source_prompt_type,
188
- "dest_prompt": dest_prompt_type,
189
- "patch_step": patch_step,
190
- "kv_cache_reset": reset_kv_cache_on_patch
191
- }
192
- }
193
-
194
- dbg(f"Releasing model instance for '{model_id}'.")
195
- del llm, state_history, patch_state
196
- gc.collect()
197
- if torch.cuda.is_available(): torch.cuda.empty_cache()
198
-
199
- return results
200
-
201
- def run_act_titration_probe(
202
- model_id: str,
203
- source_prompt_type: str,
204
- dest_prompt_type: str,
205
- patch_steps: List[int],
206
- seed: int,
207
- num_steps: int,
208
- progress_callback,
209
- ) -> Dict[str, Any]:
210
- """
211
- Führt eine Serie von "Causal Surgery"-Experimenten durch, um den "Attractor Capture Time"
212
- durch Titration des `patch_step` zu finden.
213
- """
214
- progress_callback(0.0, desc=f"Loading model '{model_id}'...")
215
- llm = get_or_load_model(model_id, seed)
216
-
217
- progress_callback(0.05, desc=f"Recording full source state history ('{source_prompt_type}')...")
218
- source_results = run_cogitation_loop(
219
- llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
220
- temperature=0.1, record_states=True
221
- )
222
- state_history = source_results["state_history"]
223
- dbg(f"Full source state history ({len(state_history)} steps) recorded.")
224
-
225
- titration_results = []
226
- total_steps = len(patch_steps)
227
- for i, step in enumerate(patch_steps):
228
- progress_callback(0.15 + (i / total_steps) * 0.8, desc=f"Titrating patch at step {step}/{num_steps}")
229
-
230
- if step >= len(state_history):
231
- dbg(f"Skipping patch step {step} as it is out of bounds for history of length {len(state_history)}.")
232
- continue
233
 
234
- patch_state = state_history[step]
 
 
 
 
 
 
 
 
235
 
 
236
  patched_run_results = run_cogitation_loop(
237
  llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
238
- temperature=0.1, patch_step=step, patch_state_source=patch_state
 
239
  )
240
 
241
- deltas = patched_run_results["state_deltas"]
242
-
243
- buffer = 10
244
- post_patch_deltas = deltas[step + buffer:]
245
- post_patch_mean_delta = np.mean(post_patch_deltas) if post_patch_deltas else 0.0
246
-
247
- titration_results.append({
248
- "patch_step": step,
249
- "post_patch_mean_delta": float(post_patch_mean_delta),
250
- "full_mean_delta": float(np.mean(deltas)),
251
- })
252
-
253
- dbg(f"Releasing model instance for '{model_id}'.")
254
- del llm, state_history
255
- gc.collect()
256
- if torch.cuda.is_available(): torch.cuda.empty_cache()
257
-
258
- return {
259
- "verdict": "### ✅ ACT Titration Complete",
260
- "titration_data": titration_results
261
- }
 
3
  import gc
4
  from typing import Dict, Any, Optional, List
5
 
6
+ from .llm_iface import get_or_load_model, LLM, release_model
7
  from .resonance_seismograph import run_cogitation_loop, run_silent_cogitation_seismic
8
  from .concepts import get_concept_vector
9
  from .introspection import generate_introspective_report
10
+ from .signal_analysis import analyze_cognitive_signal, get_power_spectrum_for_plotting
11
  from .utils import dbg
12
 
13
  def run_seismic_analysis(
 
21
  llm_instance: Optional[LLM] = None,
22
  injection_vector_cache: Optional[torch.Tensor] = None
23
  ) -> Dict[str, Any]:
24
+ """
25
+ Orchestriert eine einzelne seismische Analyse und integriert nun standardmäßig
26
+ die fortgeschrittene Signal-Analyse.
27
+ """
28
  local_llm_instance = False
29
+ llm = None
30
+ try:
31
+ if llm_instance is None:
32
+ progress_callback(0.0, desc=f"Loading model '{model_id}'...")
33
+ llm = get_or_load_model(model_id, seed)
34
+ local_llm_instance = True
 
 
 
 
 
 
 
35
  else:
36
+ llm = llm_instance
37
+ llm.set_all_seeds(seed)
38
+
39
+ injection_vector = None
40
+ if concept_to_inject and concept_to_inject.strip():
41
+ if injection_vector_cache is not None:
42
+ dbg(f"Using cached injection vector for '{concept_to_inject}'.")
43
+ injection_vector = injection_vector_cache
44
+ else:
45
+ progress_callback(0.2, desc=f"Vectorizing '{concept_to_inject}'...")
46
+ injection_vector = get_concept_vector(llm, concept_to_inject.strip())
47
+
48
+ progress_callback(0.3, desc=f"Recording dynamics for '{prompt_type}'...")
49
+
50
+ state_deltas = run_silent_cogitation_seismic(
51
+ llm=llm, prompt_type=prompt_type,
52
+ num_steps=num_steps, temperature=0.1,
53
+ injection_vector=injection_vector, injection_strength=injection_strength
54
+ )
55
 
56
+ progress_callback(0.9, desc="Analyzing...")
57
+
58
+ stats = {}
59
+ results = {}
60
+ verdict = "### ⚠️ Analysis Warning\nNo state changes recorded."
61
 
62
+ if state_deltas:
63
+ deltas_np = np.array(state_deltas)
64
+ stats = {
65
+ "mean_delta": float(np.mean(deltas_np)),
66
+ "std_delta": float(np.std(deltas_np)),
67
+ "max_delta": float(np.max(deltas_np)),
68
+ "min_delta": float(np.min(deltas_np)),
69
+ }
70
+
71
+ signal_metrics = analyze_cognitive_signal(deltas_np)
72
+ stats.update(signal_metrics)
73
 
74
+ freqs, power = get_power_spectrum_for_plotting(deltas_np)
75
 
76
+ verdict = f"### ✅ Seismic Analysis Complete\nRecorded {len(deltas_np)} steps for '{prompt_type}'."
77
+ if injection_vector is not None:
78
+ verdict += f"\nModulated with **'{concept_to_inject}'** at strength **{injection_strength:.2f}**."
79
+
80
+ results["power_spectrum"] = {"frequencies": freqs.tolist(), "power": power.tolist()}
 
 
 
81
 
82
+ results.update({ "verdict": verdict, "stats": stats, "state_deltas": state_deltas })
83
 
84
+ return results
 
 
 
 
85
 
86
+ finally:
87
+ if local_llm_instance and llm is not None:
88
+ release_model(llm)
89
 
90
  def run_triangulation_probe(
91
  model_id: str,
 
97
  injection_strength: float = 0.0,
98
  llm_instance: Optional[LLM] = None,
99
  ) -> Dict[str, Any]:
100
+ """Orchestriert ein vollständiges Triangulations-Experiment."""
 
 
101
  local_llm_instance = False
102
+ llm = None
103
+ try:
104
+ if llm_instance is None:
105
+ progress_callback(0.0, desc=f"Loading model '{model_id}'...")
106
+ llm = get_or_load_model(model_id, seed)
107
+ local_llm_instance = True
 
 
 
 
 
 
 
 
 
 
108
  else:
109
+ llm = llm_instance
110
+ llm.set_all_seeds(seed)
111
+
112
+ injection_vector = None
113
+ if concept_to_inject and concept_to_inject.strip() and injection_strength > 0:
114
+ if concept_to_inject.lower() == "random_noise":
115
+ progress_callback(0.15, desc="Generating random noise vector...")
116
+ hidden_dim = llm.stable_config.hidden_dim
117
+ noise_vec = torch.randn(hidden_dim)
118
+ base_norm = 70.0
119
+ injection_vector = (noise_vec / torch.norm(noise_vec)) * base_norm
120
+ else:
121
+ progress_callback(0.15, desc=f"Vectorizing '{concept_to_inject}'...")
122
+ injection_vector = get_concept_vector(llm, concept_to_inject.strip())
123
+
124
+ progress_callback(0.3, desc=f"Phase 1/2: Recording dynamics for '{prompt_type}'...")
125
+ state_deltas = run_silent_cogitation_seismic(
126
+ llm=llm, prompt_type=prompt_type, num_steps=num_steps, temperature=0.1,
127
+ injection_vector=injection_vector, injection_strength=injection_strength
128
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
+ progress_callback(0.7, desc="Phase 2/2: Generating introspective report...")
131
+ report = generate_introspective_report(
132
+ llm=llm, context_prompt_type=prompt_type,
133
+ introspection_prompt_type="describe_dynamics_structured", num_steps=num_steps
134
+ )
135
+
136
+ progress_callback(0.9, desc="Analyzing...")
137
+ stats = {}
138
+ verdict = "### ⚠️ Triangulation Warning"
139
+ if state_deltas:
140
+ deltas_np = np.array(state_deltas)
141
+ stats = { "mean_delta": float(np.mean(deltas_np)), "std_delta": float(np.std(deltas_np)), "max_delta": float(np.max(deltas_np)) }
142
+ verdict = "### ✅ Triangulation Probe Complete"
143
+
144
+ results = {
145
+ "verdict": verdict, "stats": stats, "state_deltas": state_deltas,
146
+ "introspective_report": report
147
+ }
148
+ return results
149
+ finally:
150
+ if local_llm_instance and llm is not None:
151
+ release_model(llm)
152
 
153
  def run_causal_surgery_probe(
154
  model_id: str,
 
160
  progress_callback,
161
  reset_kv_cache_on_patch: bool = False
162
  ) -> Dict[str, Any]:
163
+ """Orchestriert ein "Activation Patching"-Experiment."""
164
+ llm = None
165
+ try:
166
+ progress_callback(0.0, desc=f"Loading model '{model_id}'...")
167
+ llm = get_or_load_model(model_id, seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
+ progress_callback(0.1, desc=f"Phase 1/3: Recording source state ('{source_prompt_type}')...")
170
+ source_results = run_cogitation_loop(
171
+ llm=llm, prompt_type=source_prompt_type, num_steps=num_steps,
172
+ temperature=0.1, record_states=True
173
+ )
174
+ state_history = source_results["state_history"]
175
+ assert patch_step < len(state_history), f"Patch step {patch_step} is out of bounds."
176
+ patch_state = state_history[patch_step]
177
+ dbg(f"Source state at step {patch_step} recorded with norm {torch.norm(patch_state).item():.2f}.")
178
 
179
+ progress_callback(0.4, desc=f"Phase 2/3: Running patched destination ('{dest_prompt_type}')...")
180
  patched_run_results = run_cogitation_loop(
181
  llm=llm, prompt_type=dest_prompt_type, num_steps=num_steps,
182
+ temperature=0.1, patch_step=patch_step, patch_state_source=patch_state,
183
+ reset_kv_cache_on_patch=reset_kv_cache_on_patch
184
  )
185
 
186
+ progress_callback(0.8, desc="Phase 3/3: Generating introspective report...")
187
+ report = generate_introspective_report(
188
+ llm=llm,