Create examples/app-console.py
Browse files- examples/app-console.py +42 -0
examples/app-console.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#app-console.py
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from transformers import AutoTokenizer, AutoModel
|
| 6 |
+
from quantmodel import QuantizedSentenceEncoder
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def mean_pooling(model_output, attention_mask):
|
| 10 |
+
token_embeddings = model_output[0]
|
| 11 |
+
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
| 12 |
+
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def encode_original(texts, model_name="deepvk/USER-BGE-M3", normalize=True, device="cpu"):
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 17 |
+
model = AutoModel.from_pretrained(model_name).to(device)
|
| 18 |
+
|
| 19 |
+
encoded_input = tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to(device)
|
| 20 |
+
with torch.no_grad():
|
| 21 |
+
model_output = model(**encoded_input)
|
| 22 |
+
|
| 23 |
+
embeddings = mean_pooling(model_output, encoded_input["attention_mask"])
|
| 24 |
+
|
| 25 |
+
if normalize:
|
| 26 |
+
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 27 |
+
|
| 28 |
+
return embeddings.cpu().numpy()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
if __name__ == "__main__":
|
| 32 |
+
texts = ["Привет мир!", "Hello world!"]
|
| 33 |
+
|
| 34 |
+
orig_embeddings = encode_original(texts)
|
| 35 |
+
quant_model = QuantizedSentenceEncoder()
|
| 36 |
+
quant_embeddings = quant_model.encode(texts)
|
| 37 |
+
|
| 38 |
+
print("Original shape:", orig_embeddings.shape)
|
| 39 |
+
print("Quantized shape:", quant_embeddings.shape)
|
| 40 |
+
|
| 41 |
+
cos_sim = np.dot(orig_embeddings[0], quant_embeddings[0])
|
| 42 |
+
print(f"Cosine similarity between original & quantized: {cos_sim:.4f}")
|