AngelBottomless commited on
Commit
33071f6
·
verified ·
1 Parent(s): 4b4e2c4

Update DB with some utils

Browse files
Files changed (5) hide show
  1. db.py +6 -2
  2. fix_tags.py +46 -0
  3. gelbooru2024-02.db +1 -1
  4. utils/gelboorutags.py +322 -0
  5. utils/proxyhandler.py +223 -0
db.py CHANGED
@@ -139,7 +139,9 @@ def load_db(db_file: str):
139
  return [tag for tag in self.tag_list if tag.type == "unknown"]
140
 
141
 
142
- class Tag(BaseModel):
 
 
143
  id = IntegerField(primary_key=True)
144
  name = CharField(unique=True)
145
  type = EnumField(["general", "artist", "character", "copyright", "meta", "unknown"]) # unknown is for gelbooru unbased tags, should be fixed in future
@@ -190,6 +192,8 @@ def load_db(db_file: str):
190
  LocalPost._meta.database = db
191
  db.connect()
192
  print("Database connected.")
 
 
193
  if not file_exists:
194
  db.create_tables([Post, Tag, PostTagRelation])
195
  db.create_tables([LocalPost])
@@ -225,4 +229,4 @@ if __name__ == "__main__":
225
  break
226
  break
227
  else:
228
- print('not a file')
 
139
  return [tag for tag in self.tag_list if tag.type == "unknown"]
140
 
141
 
142
+ class Tag(BaseModel):# table name is tags
143
+ class Meta:
144
+ db_table = "tags"
145
  id = IntegerField(primary_key=True)
146
  name = CharField(unique=True)
147
  type = EnumField(["general", "artist", "character", "copyright", "meta", "unknown"]) # unknown is for gelbooru unbased tags, should be fixed in future
 
192
  LocalPost._meta.database = db
193
  db.connect()
194
  print("Database connected.")
195
+ # print all tables
196
+ print(db.get_tables())
197
  if not file_exists:
198
  db.create_tables([Post, Tag, PostTagRelation])
199
  db.create_tables([LocalPost])
 
229
  break
230
  break
231
  else:
232
+ print('not a file')
fix_tags.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.gelboorutags import GelbooruTag as TagHandler
2
+ from db import load_db
3
+ from peewee import fn
4
+ from tqdm import tqdm
5
+ handler = TagHandler(exception_handle=0) #general for 0
6
+
7
+ def fix_tags(tags):
8
+ result = handler.get_types(tags, verbose=True)[0]
9
+ if result == "deprecated":
10
+ return "general"
11
+ return result
12
+
13
+ # test
14
+
15
+ db_dict = load_db('gelbooru2024-02.db')
16
+ GelbooruDB, GelbooruPost, GelbooruTag, GelbooruPostTagRelation = db_dict['db'], db_dict['Post'], db_dict['Tag'], db_dict['PostTagRelation']
17
+
18
+ # get tags which is not unknown type
19
+ tags = GelbooruTag.select().where(GelbooruTag.type != "unknown")
20
+ print(tags.count())
21
+ # get random post
22
+ tags = GelbooruTag.select().where(GelbooruTag.type == "unknown")
23
+ pbar = tqdm(tags, total=tags.count()) # Tag -> fix_tags(tag.name) -> [Tag.type]
24
+ fixed_count = 0
25
+ # batch update
26
+ batch_size = 1000
27
+ batch = []
28
+ for tag in tags:
29
+ batch.append((fix_tags(tag.name), tag.id))
30
+ if len(batch) >= batch_size:
31
+ with GelbooruDB.atomic():
32
+ pbar.set_description("Updating")
33
+ for fixed_tag, tag_id in batch:
34
+ GelbooruTag.update(type=fixed_tag).where(GelbooruTag.id == tag_id).execute()
35
+ fixed_count += len(batch)
36
+ batch = []
37
+ pbar.update(batch_size)
38
+ pbar.set_postfix(fixed=fixed_count)
39
+ else:
40
+ pbar.set_description("Collecting")
41
+ if batch:
42
+ with GelbooruDB.atomic():
43
+ for fixed_tag, tag_id in batch:
44
+ GelbooruTag.update(type=fixed_tag).where(GelbooruTag.id == tag_id).execute()
45
+ fixed_count += len(batch)
46
+ pbar.set_postfix(fixed=fixed_count)
gelbooru2024-02.db CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f2ae54e29175dfc64d9cfa277498c1a79430311b12403b4d28998573cabdd65
3
  size 14257557504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db650c3f1a6d505054d1fb22d0a8a499e01b265eb3ff905f9b198f7fc5c10971
3
  size 14257557504
utils/gelboorutags.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from .proxyhandler import ProxyHandler
3
+ from typing import List
4
+ import json
5
+ import html
6
+ import logging
7
+ import datetime
8
+ from urllib.parse import quote
9
+ from threading import Lock
10
+
11
+ class GelbooruTag:
12
+ """
13
+ Tag dictionary
14
+ """
15
+ TAG_TYPE = {
16
+ 0: "general",
17
+ 1: "artist",
18
+ 3: "copyright",
19
+ 4: "character",
20
+ 5: "meta",
21
+ 6: "deprecated"
22
+ }
23
+ def __init__(self, file_name="gelbooru_tags.jsonl", handler:ProxyHandler=None, exception_handle=None):
24
+ """
25
+ exception_handle -> tag type that will be used if tag is not found
26
+ """
27
+ self.file_name = file_name
28
+ self.tags = {}
29
+ self.type_by_name = {}
30
+ self.handler = handler
31
+ self.exception_handle = exception_handle # if tag not found, what to do
32
+ self.load()
33
+ self.filewrite_lock = Lock()
34
+ def load(self):
35
+ """
36
+ Loads the tags
37
+ """
38
+ if not os.path.exists(self.file_name):
39
+ return
40
+ with open(self.file_name, 'r', encoding='utf-8') as f:
41
+ for line in f:
42
+ try:
43
+ tag = json.loads(line)
44
+ except Exception as exce:
45
+ if isinstance(exce, KeyboardInterrupt):
46
+ raise exce
47
+ continue
48
+ self.tags[tag['name']] = tag
49
+ self.type_by_name[tag['name']] = tag['type']
50
+ # add html escaped version
51
+ escaped_tag_name = html.escape(tag['name']).replace("'", "'")
52
+ self.tags[escaped_tag_name] = tag
53
+ self.type_by_name[escaped_tag_name] = tag['type']
54
+ def save(self):
55
+ """
56
+ Saves the tags
57
+ """
58
+ with open(self.file_name, 'w', encoding='utf-8') as f:
59
+ for tag in self.tags.values():
60
+ f.write(json.dumps(tag) + "\n")
61
+ def save_tag(self, tag):
62
+ """
63
+ Saves the tag
64
+ """
65
+ with self.filewrite_lock:
66
+ with open(self.file_name, 'a', encoding='utf-8') as f:
67
+ f.write(json.dumps(tag) + "\n")
68
+ def get_missing_tags(self, tags_string):
69
+ """
70
+ Returns the missing tags (not locally stored)
71
+ """
72
+ tags_string_list = tags_string.split(" ")
73
+ tags = []
74
+ for tag in tags_string_list:
75
+ if self.get_tag(tag):
76
+ continue
77
+ tags.append(tag)
78
+ return tags
79
+ def reorganize(self, write_to_new_file=True):
80
+ # writes down the tags into a new file
81
+ if not write_to_new_file:
82
+ with open(self.file_name, 'w', encoding='utf-8') as f:
83
+ for tag_values in self.tags.values():
84
+ f.write(json.dumps(tag_values) + "\n")
85
+ return
86
+ with open(self.file_name + "_new", 'w', encoding='utf-8') as f:
87
+ for tag_values in self.tags.values():
88
+ f.write(json.dumps(tag_values) + "\n")
89
+ def reorganize_and_reload(self):
90
+ """
91
+ Reorganizes and reloads the tags
92
+ Useful for broken jsonl files
93
+ """
94
+ self.reorganize(write_to_new_file=False)
95
+ self.load()
96
+ def get_tag(self, tag_name):
97
+ """
98
+ Returns the tag
99
+ """
100
+ # if startswith backslash, remove it
101
+ if tag_name.startswith("\\"):
102
+ tag_name = tag_name[1:]
103
+ #print(tag_name) #ninomae_ina'nis -> ninomae_ina'nis
104
+ basic_escape = tag_name.replace("'", "'")
105
+ tag_name_urlsafe = html.unescape(tag_name).replace("'", "'")
106
+ lower_tag_name = tag_name.lower()
107
+ upper_tag_name = tag_name.upper()
108
+ #print(tag_name_urlsafe)
109
+ if tag_name not in self.tags and tag_name_urlsafe not in self.tags and basic_escape not in self.tags and lower_tag_name not in self.tags and upper_tag_name not in self.tags:
110
+ return None
111
+ if basic_escape in self.tags:
112
+ return self.tags[basic_escape]
113
+ if lower_tag_name in self.tags:
114
+ return self.tags[lower_tag_name]
115
+ if upper_tag_name in self.tags:
116
+ return self.tags[upper_tag_name]
117
+ return self.tags[tag_name] if tag_name in self.tags else self.tags[tag_name_urlsafe]
118
+ def _check_handler(self, handler:ProxyHandler):
119
+ """
120
+ Checks the handler
121
+ """
122
+ if handler is None:
123
+ handler = self.handler
124
+ if handler is None:
125
+ if self.exception_handle is None:
126
+ logging.error("Error: Tag was not in dictionary, but cannot get tag because handler is None")
127
+ raise RuntimeError("Error: Tag was not in dictionary, but cannot get tag because handler is None")
128
+ return handler
129
+ def get_types(self, tags_string, handler:ProxyHandler=None, max_retry=10, verbose=False):
130
+ """
131
+ Returns the types of given tags
132
+ This can be used for bulk processing
133
+ Use threaded version for faster processing (if you have proxy handler)
134
+ """
135
+ if tags_string.isspace():
136
+ return []
137
+ self.parse_tags(tags_string, handler, max_retry=max_retry)
138
+ types = []
139
+ for tag in tags_string.split(" "):
140
+ # search self.type_by_name first
141
+ if tag in self.type_by_name:
142
+ types.append(self.type_by_name[tag])
143
+ continue
144
+ else:
145
+ # first, search dictionary
146
+ if (tag_result:=self.get_tag(tag)) is not None:
147
+ types.append(tag_result['type'])
148
+ # add to self.type_by_name
149
+ self.type_by_name[tag] = tag_result['type']
150
+ continue
151
+ logging.error(f"Error: {tag} not found from dictionary")
152
+ if self.exception_handle is not None:
153
+ self.type_by_name[tag] = self.exception_handle
154
+ types.append(self.exception_handle)
155
+ else:
156
+ raise Exception(f"Error: {tag} not found from type_by_name")
157
+ if not verbose:
158
+ return types
159
+ return [GelbooruTag.TAG_TYPE[t] for t in types]
160
+ def structured_tags(self, tags_string, handler:ProxyHandler=None, max_retry=10):
161
+ """
162
+ Returns the tags and classes as a dictionary
163
+ This can be used for any string input (maybe merged too) for bulk processing
164
+ """
165
+ tags_each = tags_string.split(" ")
166
+ tag_types = self.get_types(tags_string, handler, max_retry=max_retry,verbose=True)
167
+ tag_dict = {}
168
+ for tag, tag_type in zip(tags_each, tag_types):
169
+ if tag_type not in tag_dict:
170
+ tag_dict[tag_type] = []
171
+ tag_dict[tag_type].append(tag)
172
+ return tag_dict
173
+ def parse_tags(self, tags_string, handler:ProxyHandler=None, max_retry=10):
174
+ """
175
+ Returns the tags and classes
176
+ """
177
+ tags_string_list = tags_string.split(" ")
178
+ tags_string_list = [tag for tag in tags_string_list if tag.strip()]
179
+ tags = []
180
+ # prepare _get_tags
181
+ tag_query_prepared = []
182
+ # split into 100 tags per request
183
+ for i in range(0, len(tags_string_list), 100):
184
+ tag_query_prepared.append(tags_string_list[i:i+100])
185
+ # get tags
186
+ for tag_query in tag_query_prepared:
187
+ self._get_tags(tag_query, handler, max_retry=max_retry)
188
+ if handler is not None:
189
+ avg_response_time = handler.get_average_time()
190
+ if avg_response_time:
191
+ print(f"Average response time: {avg_response_time}")
192
+ # get tags
193
+ for tag in tags_string_list:
194
+ if (tag_result:=self.get_tag(tag)) is None:
195
+ print(f"Error: {tag} not found")
196
+ continue
197
+ tags.append(tag_result)
198
+ return tags
199
+ def _get_tags(self, tag_names:List[str], handler:ProxyHandler=None, max_retry=10):
200
+ """
201
+ Returns the tag. The tag_names should not exceed 100 tags
202
+ This may require internet connection.
203
+ """
204
+ if not tag_names:
205
+ return
206
+ missing_tags = []
207
+ for tag_name in tag_names:
208
+ if self.get_tag(tag_name):
209
+ continue
210
+ missing_tags.append(tag_name)
211
+ if not missing_tags:
212
+ return
213
+ tag_name = " ".join(missing_tags)
214
+ # unescape html
215
+ tag_name = html.unescape(tag_name).replace("'", "'")
216
+ # url encode
217
+ tag_name = quote(tag_name, safe='')
218
+ try:
219
+ self._check_handler(handler)
220
+ except RuntimeError as e:
221
+ if self.exception_handle is not None:
222
+ for tag in missing_tags:
223
+ self.type_by_name[tag] = self.exception_handle
224
+ return
225
+ for i in range(max_retry):
226
+ try:
227
+ response = handler.get_response(f"https://gelbooru.com/index.php?page=dapi&s=tag&q=index&json=1&names={tag_name}")
228
+ if response is None:
229
+ continue
230
+ tag = json.loads(response) if isinstance(response, str) else response
231
+ if not tag:
232
+ print(f"Error: {tag_name} not found from response {response}")
233
+ continue
234
+ if "tag" not in tag:
235
+ logging.error(f"Error: {tag_name} not found from response {response}")
236
+ print(f"Error: {tag_name} not found from response {response}")
237
+ continue
238
+ # {"@attributes":{"limit":100,"offset":0,"count":4},"tag":[{"id":152532,"name":"1girl","count":6177827,"type":0,"ambiguous":0},{"id":138893,"name":"1boy","count":1481404,"type":0,"ambiguous":0},{"id":444,"name":"apron","count":174832,"type":0,"ambiguous":0},{"id":135309,"name":"blunt_bangs","count":233912,"type":0,"ambiguous":0}]}
239
+ for tag in tag['tag']:
240
+ self.tags[tag['name']] = tag
241
+ self.type_by_name[tag['name']] = tag['type']
242
+ # add html escaped version
243
+ escaped_tag_name = html.escape(tag['name']).replace("'", "'")
244
+ self.tags[escaped_tag_name] = tag
245
+ self.type_by_name[escaped_tag_name] = tag['type']
246
+ # lower case
247
+ lower_tag_name = tag['name'].lower()
248
+ self.tags[lower_tag_name] = tag
249
+ self.type_by_name[lower_tag_name] = tag['type']
250
+ self.save_tag(tag)
251
+ return
252
+ except Exception as e:
253
+ logging.exception(f"Exception: {e} when getting tag {tag_name}, retrying {i}/{max_retry}")
254
+ print(f"Exception: {e} when getting tag {tag_name}, retrying {i}/{max_retry}")
255
+ pass
256
+ print(f"Error: {tag_name} not found after {max_retry} retries")
257
+ def tag_exists(self, tag_name):
258
+ """
259
+ Returns if the tag exists
260
+ """
261
+ return tag_name in self.tags
262
+
263
+
264
+ class GelbooruMetadata:
265
+ def __init__(self, **kwargs) -> None:
266
+ self.id = kwargs.get("id")
267
+ # convert to YYYY-MM-DD HH:MM:SS format
268
+ self.created_at = datetime.datetime.strptime(kwargs.get("created_at"), "%a %b %d %H:%M:%S %z %Y").strftime("%Y-%m-%d %H:%M:%S")
269
+ self.score = kwargs.get("score")
270
+ self.width = kwargs.get("width")
271
+ self.height = kwargs.get("height")
272
+ self.md5 = kwargs.get("md5")
273
+ self.image_ext = kwargs.get("image").split(".")[-1]
274
+ self.rating = kwargs.get("rating")
275
+ self.source = kwargs.get("source", "")
276
+ self.tags = kwargs.get("tags")
277
+ self.title = kwargs.get("title", "")
278
+ self.file_url = kwargs.get("file_url")
279
+ self.has_children = kwargs.get("has_children", False)
280
+ self.parent_id = kwargs.get("parent_id", 0)
281
+ def get_dict(self):
282
+ return dict(
283
+ id=self.id,
284
+ created_at=self.created_at,
285
+ score=self.score,
286
+ width=self.width,
287
+ height=self.height,
288
+ md5=self.md5,
289
+ image_ext=self.image_ext,
290
+ rating=self.rating,
291
+ source=self.source,
292
+ tags=self.tags,
293
+ title=self.title,
294
+ file_url=self.file_url,
295
+ has_children=self.has_children,
296
+ parent_id=self.parent_id,
297
+ )
298
+ def structured_dict(self, tag_handler:GelbooruTag, handler:ProxyHandler=None, max_retry=10):
299
+ """
300
+ Returns the structured dictionary
301
+ """
302
+ tags = tag_handler.structured_tags(self.tags, handler, max_retry=max_retry)
303
+ return dict(
304
+ id=self.id,
305
+ created_at=self.created_at,
306
+ score=self.score,
307
+ width=self.width,
308
+ height=self.height,
309
+ md5=self.md5,
310
+ image_ext=self.image_ext,
311
+ rating=self.rating,
312
+ source=self.source,
313
+ title=self.title,
314
+ file_url=self.file_url,
315
+ has_children=self.has_children,
316
+ parent_id=self.parent_id,
317
+ tag_list_general=tags.get("general", []),
318
+ tag_list_artist=tags.get("artist", []),
319
+ tag_list_character=tags.get("character", []),
320
+ tag_list_meta=tags.get("meta", []),
321
+ tag_list_copyright=tags.get("copyright", []),
322
+ )
utils/proxyhandler.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Proxy Handler Class
3
+ """
4
+ import json
5
+ from queue import Queue
6
+ import time
7
+ import urllib.parse
8
+ import threading
9
+ import requests
10
+
11
+ class ThreadSafeDict(dict):
12
+ """
13
+ Thread safe dict
14
+ """
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+ self.lock = threading.Lock()
18
+ def __getitem__(self, key):
19
+ with self.lock:
20
+ return super().__getitem__(key)
21
+ def __setitem__(self, key, value):
22
+ with self.lock:
23
+ return super().__setitem__(key, value)
24
+ def __delitem__(self, key):
25
+ with self.lock:
26
+ return super().__delitem__(key)
27
+ def __contains__(self, key):
28
+ with self.lock:
29
+ return super().__contains__(key)
30
+ def __len__(self):
31
+ with self.lock:
32
+ return super().__len__()
33
+ def __iter__(self):
34
+ with self.lock:
35
+ return super().__iter__()
36
+ def __repr__(self):
37
+ with self.lock:
38
+ return super().__repr__()
39
+ def __str__(self):
40
+ with self.lock:
41
+ return super().__str__()
42
+
43
+ class ProxyHandler:
44
+ """
45
+ Sends request to http://{ip}:{port}/get_response_raw?url={url} with auth
46
+ """
47
+ def __init__(self, proxy_list_file,proxy_auth="user:pass",port=80, wait_time=0.1,timeouts=10):
48
+ self.proxy_auth = proxy_auth
49
+ self.port = port
50
+ self.proxy_list = []
51
+ self.commit_time = ThreadSafeDict()
52
+ self.timeouts = timeouts
53
+ self.wait_time = wait_time
54
+ self.lock = threading.Lock()
55
+ self.last_logged_activities = Queue(maxsize=100)
56
+ with open(proxy_list_file, 'r', encoding='utf-8') as f:
57
+ for line in f:
58
+ self.proxy_list.append(line.strip())
59
+ for i, proxy in enumerate(self.proxy_list):
60
+ if not proxy.startswith("http"):
61
+ proxy = "http://" + proxy
62
+ if ":" not in proxy:
63
+ proxy += f":{self.port}"
64
+ if not proxy.endswith("/"):
65
+ proxy += "/"
66
+ self.proxy_list[i] = proxy
67
+ self.proxy_index = -1
68
+ def log_time(self):
69
+ """
70
+ Logs the time
71
+ """
72
+ self.last_logged_activities.put(time.time())
73
+ if self.last_logged_activities.full():
74
+ # empty oldest
75
+ self.last_logged_activities.get()
76
+ def get_average_time(self):
77
+ """
78
+ Returns the average time
79
+ """
80
+ # get oldest and newest to get the average time
81
+ if len(self.last_logged_activities.queue) > 1:
82
+ return (self.last_logged_activities.queue[-1] - self.last_logged_activities.queue[0]) / self.last_logged_activities.qsize()
83
+ return 0
84
+ def wait_until_commit(self, proxy_index=None):
85
+ """
86
+ Waits until the commit time
87
+ """
88
+ if proxy_index is None:
89
+ proxy_index = self.proxy_index
90
+ if proxy_index not in self.commit_time:
91
+ self.commit_time[proxy_index] = 0
92
+ while time.time() < self.commit_time[proxy_index] + self.wait_time:
93
+ time.sleep(0.01)
94
+ self.commit_time[proxy_index] = time.time()
95
+ def _update_proxy_index(self):
96
+ """
97
+ Updates the proxy index
98
+ """
99
+ with self.lock:
100
+ self.proxy_index = (self.proxy_index + 1) % len(self.proxy_list)
101
+ return self.proxy_index
102
+ def get_response(self, url):
103
+ """
104
+ Returns the response of the url
105
+ """
106
+ url = urllib.parse.quote(url, safe='')
107
+ try:
108
+ index = self._update_proxy_index()
109
+ self.wait_until_commit(index)
110
+ self.log_time()
111
+ response = requests.get(self.proxy_list[index] + f"get_response?url={url}", timeout=self.timeouts, auth=tuple(self.proxy_auth.split(":")))
112
+ if response.status_code == 200:
113
+ json_response = response.json()
114
+ if json_response["success"]:
115
+ return json.loads(json_response["response"])
116
+ else:
117
+ if "429" in json_response["response"]:
118
+ self.commit_time[index] = time.time() + self.timeouts
119
+ print(f"Error: {json_response['response']}, waiting {self.timeouts} seconds")
120
+ print(f"Failed in proxy side: {json_response['response']}")
121
+ return None
122
+ elif response.status_code == 429:
123
+ self.commit_time[index] = time.time() + self.timeouts
124
+ print(f"Error: {response.status_code}, waiting {self.timeouts} seconds")
125
+ else:
126
+ print(f"Failed in proxy side: {response.status_code}")
127
+ return None
128
+ except Exception as e:
129
+ print(f"Error while processing response from proxy: {e}")
130
+ return None
131
+ def get(self, url):
132
+ """
133
+ Returns the response of the url
134
+ """
135
+ url = urllib.parse.quote(url, safe='')
136
+ try:
137
+ index = self._update_proxy_index()
138
+ self.wait_until_commit(index)
139
+ response = requests.get(self.proxy_list[index] + f"get_response_raw?url={url}", timeout=self.timeouts, auth=tuple(self.proxy_auth.split(":")))
140
+ if response.status_code == 200:
141
+ return response
142
+ else:
143
+ print(f"Error: {response.status_code}")
144
+ return None
145
+ except Exception as e:
146
+ print(f"Exception: {e}")
147
+ return None
148
+ def filesize(self, url):
149
+ """
150
+ Returns the filesize of the url
151
+ """
152
+ url = urllib.parse.quote(url, safe='')
153
+ try:
154
+ index = self._update_proxy_index()
155
+ self.wait_until_commit(index)
156
+ response = requests.get(self.proxy_list[index] + f"file_size?url={url}", timeout=self.timeouts, auth=tuple(self.proxy_auth.split(":")))
157
+ if response.status_code == 200:
158
+ return int(response.text)
159
+ else:
160
+ print(f"Error: {response.status_code} when getting filesize from {url}")
161
+ return None
162
+ except Exception as e:
163
+ print(f"Exception: {e}")
164
+ return None
165
+ def get_filepart(self, url, start, end):
166
+ """
167
+ Returns the response of the url with range
168
+ """
169
+ url = urllib.parse.quote(url, safe='')
170
+ try:
171
+ index = self._update_proxy_index()
172
+ self.wait_until_commit(index)
173
+ response = requests.get(self.proxy_list[index] + f"filepart?url={url}&start={start}&end={end}", timeout=self.timeouts, auth=tuple(self.proxy_auth.split(":")))
174
+ if response.status_code == 200:
175
+ return response
176
+ else:
177
+ print(f"Error: {response.status_code}")
178
+ return None
179
+ except Exception as e:
180
+ print(f"Exception: {e}")
181
+ return None
182
+ def check(self,raise_exception=False):
183
+ """
184
+ Checks if the proxies are working
185
+ """
186
+ failed_proxies = []
187
+ for i, proxy in enumerate(self.proxy_list):
188
+ try:
189
+ response = requests.get(proxy, auth=tuple(self.proxy_auth.split(":")), timeout=2)
190
+ if response.status_code == 200:
191
+ continue
192
+ else:
193
+ print(f"Proxy {proxy} is not working")
194
+ failed_proxies.append(i)
195
+ except Exception as e:
196
+ print(f"Proxy {proxy} is not working: {e}")
197
+ failed_proxies.append(i)
198
+ if len(failed_proxies) > 0:
199
+ if raise_exception:
200
+ raise Exception(f"Proxies {failed_proxies} are not working")
201
+ else:
202
+ print(f"Proxies {failed_proxies} are not working, total {len(failed_proxies)} proxies of {len(self.proxy_list)} are not working")
203
+ # remove failed proxies
204
+ for i in failed_proxies[::-1]:
205
+ del self.proxy_list[i]
206
+ if len(self.proxy_list) == 0:
207
+ raise Exception("No proxies available")
208
+ else:
209
+ print(f"All {len(self.proxy_list)} proxies are working")
210
+
211
+ class SingleProxyHandler(ProxyHandler):
212
+ """
213
+ Sends request to http://{ip}:{port}/get_response_raw?url={url} with auth
214
+ """
215
+ def __init__(self, proxy_url, proxy_auth="user:pass",port=80, wait_time=0.1,timeouts=10):
216
+ self.proxy_auth = proxy_auth
217
+ self.port = port
218
+ self.proxy_list = [proxy_url]
219
+ self.proxy_index = -1
220
+ self.commit_time = ThreadSafeDict()
221
+ self.timeouts = timeouts
222
+ self.wait_time = wait_time
223
+ self.lock = threading.Lock()