Shiowo2 commited on
Commit
5219c80
·
verified ·
1 Parent(s): bd2e7b8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +55 -1
README.md CHANGED
@@ -25,28 +25,40 @@ import csv
25
  import time
26
  import requests
27
  from urllib.parse import quote
 
28
  OUT_CSV = "jabodetabek_sports_osm.csv"
 
29
  BBOX = (-6.80, 106.30, -5.90, 107.20)
 
30
  OVERPASS_URL = "https://overpass-api.de/api/interpreter"
31
  WIKIDATA_ENTITY_URL = "https://www.wikidata.org/wiki/Special:EntityData/{qid}.json"
 
32
  FETCH_WIKIDATA_IMAGES = True
 
33
  HEADERS = {"User-Agent": "jabodetabek-sports-scraper/1.0 (contact: yourname@example.com)"}
 
34
  def osm_browse_link(osm_type: str, osm_id: int) -> str:
35
  return f"https://www.openstreetmap.org/{osm_type}/{osm_id}"
 
36
  def commons_file_url(filename: str, width: int = 1600) -> str:
 
37
  fn = filename.strip()
38
  if fn.lower().startswith("file:"):
39
  fn = fn.split(":", 1)[1]
40
  return f"https://commons.wikimedia.org/wiki/Special:FilePath/{quote(fn)}?width={width}"
 
41
  def extract_image_link(tags: dict) -> str:
 
42
  img = tags.get("image")
43
  if img:
44
  if img.startswith("http"):
45
  return img
46
  return commons_file_url(img)
 
47
  wm = tags.get("wikimedia_commons")
48
  if wm:
49
  return commons_file_url(wm)
 
50
  qid = tags.get("wikidata")
51
  if FETCH_WIKIDATA_IMAGES and qid and qid.upper().startswith("Q"):
52
  try:
@@ -61,29 +73,40 @@ def extract_image_link(tags: dict) -> str:
61
  return commons_file_url(filename)
62
  except Exception:
63
  pass
 
64
  return ""
 
65
  def compose_address(tags: dict) -> str:
 
66
  if "addr:full" in tags:
67
  return tags["addr:full"]
 
68
  parts = []
 
69
  street = tags.get("addr:street")
70
  houseno = tags.get("addr:housenumber")
71
  if street and houseno:
72
  parts.append(f"{street} {houseno}")
73
  elif street:
74
  parts.append(street)
 
75
  for k in ("addr:neighbourhood", "addr:suburb", "addr:village"):
76
  if tags.get(k):
77
  parts.append(tags[k])
 
78
  for k in ("addr:city", "addr:municipality", "addr:county"):
79
  if tags.get(k):
80
  parts.append(tags[k])
 
81
  for k in ("addr:province", "addr:state"):
82
  if tags.get(k):
83
  parts.append(tags[k])
 
84
  if tags.get("addr:postcode"):
85
  parts.append(tags["addr:postcode"])
 
86
  return ", ".join(parts)
 
87
  def build_types(tags: dict) -> str:
88
  bits = []
89
  if "leisure" in tags:
@@ -93,44 +116,71 @@ def build_types(tags: dict) -> str:
93
  if "sport" in tags:
94
  bits.append(f"sport:{tags['sport']}")
95
  return ", ".join(bits)
 
96
  def fetch_overpass(bbox):
97
  s, w, n, e = bbox
 
98
  leisure_regex = "^(sports_centre|fitness_centre|stadium|pitch|swimming_pool|track)$"
99
- query = f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  r = requests.post(OVERPASS_URL, data={"data": query}, headers=HEADERS, timeout=180)
101
  r.raise_for_status()
102
  return r.json().get("elements", [])
 
103
  def element_coords(el) -> tuple[float, float]:
104
  if el["type"] == "node":
105
  return el.get("lat"), el.get("lon")
 
106
  c = el.get("center") or {}
107
  return c.get("lat"), c.get("lon")
 
108
  def main():
109
  elements = fetch_overpass(BBOX)
110
  seen = set()
111
  rows = []
 
112
  for el in elements:
113
  el_type = el.get("type")
 
114
  el_id = el.get("id")
115
  tags = el.get("tags", {}) or {}
 
116
  key = (el_type, el_id)
117
  if key in seen:
118
  continue
119
  seen.add(key)
 
120
  lat, lon = element_coords(el)
121
  if lat is None or lon is None:
122
  continue
 
123
  name = tags.get("name") or "(Unnamed)"
124
  addr = compose_address(tags)
125
  types = build_types(tags)
126
  osm_link = osm_browse_link(el_type, el_id)
127
  image_link = extract_image_link(tags)
 
128
  likely_sporty = (
129
  "leisure" in tags and tags["leisure"] in
130
  {"sports_centre", "fitness_centre", "stadium", "pitch", "swimming_pool", "track"}
131
  ) or ("sport" in tags)
 
132
  if not likely_sporty:
133
  continue
 
134
  rows.append({
135
  "name": name,
136
  "address": addr,
@@ -142,13 +192,17 @@ def main():
142
  "osm_type": el_type,
143
  "osm_id": el_id,
144
  })
 
145
  fieldnames = ["name", "address", "lat", "lng", "types", "osm_link", "image_link", "osm_type", "osm_id"]
146
  with open(OUT_CSV, "w", newline="", encoding="utf-8") as f:
147
  w = csv.DictWriter(f, fieldnames=fieldnames)
148
  w.writeheader()
149
  for row in rows:
150
  w.writerow(row)
 
151
  print(f"Saved {len(rows)} places to {OUT_CSV}")
 
152
  if __name__ == "__main__":
153
  main()
 
154
  ```
 
25
  import time
26
  import requests
27
  from urllib.parse import quote
28
+
29
  OUT_CSV = "jabodetabek_sports_osm.csv"
30
+
31
  BBOX = (-6.80, 106.30, -5.90, 107.20)
32
+
33
  OVERPASS_URL = "https://overpass-api.de/api/interpreter"
34
  WIKIDATA_ENTITY_URL = "https://www.wikidata.org/wiki/Special:EntityData/{qid}.json"
35
+
36
  FETCH_WIKIDATA_IMAGES = True
37
+
38
  HEADERS = {"User-Agent": "jabodetabek-sports-scraper/1.0 (contact: yourname@example.com)"}
39
+
40
  def osm_browse_link(osm_type: str, osm_id: int) -> str:
41
  return f"https://www.openstreetmap.org/{osm_type}/{osm_id}"
42
+
43
  def commons_file_url(filename: str, width: int = 1600) -> str:
44
+
45
  fn = filename.strip()
46
  if fn.lower().startswith("file:"):
47
  fn = fn.split(":", 1)[1]
48
  return f"https://commons.wikimedia.org/wiki/Special:FilePath/{quote(fn)}?width={width}"
49
+
50
  def extract_image_link(tags: dict) -> str:
51
+
52
  img = tags.get("image")
53
  if img:
54
  if img.startswith("http"):
55
  return img
56
  return commons_file_url(img)
57
+
58
  wm = tags.get("wikimedia_commons")
59
  if wm:
60
  return commons_file_url(wm)
61
+
62
  qid = tags.get("wikidata")
63
  if FETCH_WIKIDATA_IMAGES and qid and qid.upper().startswith("Q"):
64
  try:
 
73
  return commons_file_url(filename)
74
  except Exception:
75
  pass
76
+
77
  return ""
78
+
79
  def compose_address(tags: dict) -> str:
80
+
81
  if "addr:full" in tags:
82
  return tags["addr:full"]
83
+
84
  parts = []
85
+
86
  street = tags.get("addr:street")
87
  houseno = tags.get("addr:housenumber")
88
  if street and houseno:
89
  parts.append(f"{street} {houseno}")
90
  elif street:
91
  parts.append(street)
92
+
93
  for k in ("addr:neighbourhood", "addr:suburb", "addr:village"):
94
  if tags.get(k):
95
  parts.append(tags[k])
96
+
97
  for k in ("addr:city", "addr:municipality", "addr:county"):
98
  if tags.get(k):
99
  parts.append(tags[k])
100
+
101
  for k in ("addr:province", "addr:state"):
102
  if tags.get(k):
103
  parts.append(tags[k])
104
+
105
  if tags.get("addr:postcode"):
106
  parts.append(tags["addr:postcode"])
107
+
108
  return ", ".join(parts)
109
+
110
  def build_types(tags: dict) -> str:
111
  bits = []
112
  if "leisure" in tags:
 
116
  if "sport" in tags:
117
  bits.append(f"sport:{tags['sport']}")
118
  return ", ".join(bits)
119
+
120
  def fetch_overpass(bbox):
121
  s, w, n, e = bbox
122
+
123
  leisure_regex = "^(sports_centre|fitness_centre|stadium|pitch|swimming_pool|track)$"
124
+
125
+ query = f"""
126
+ [out:json][timeout:180];
127
+ (
128
+ node["leisure"~"{leisure_regex}"]({s},{w},{n},{e});
129
+ way["leisure"~"{leisure_regex}"]({s},{w},{n},{e});
130
+ relation["leisure"~"{leisure_regex}"]({s},{w},{n},{e});
131
+
132
+ // Any feature explicitly tagged with sport=*, but avoid retail shops
133
+ node["sport"]["shop"!~".*"]({s},{w},{n},{e});
134
+ way["sport"]["shop"!~".*"]({s},{w},{n},{e});
135
+ relation["sport"]["shop"!~".*"]({s},{w},{n},{e});
136
+ );
137
+ out center tags;
138
+ """
139
  r = requests.post(OVERPASS_URL, data={"data": query}, headers=HEADERS, timeout=180)
140
  r.raise_for_status()
141
  return r.json().get("elements", [])
142
+
143
  def element_coords(el) -> tuple[float, float]:
144
  if el["type"] == "node":
145
  return el.get("lat"), el.get("lon")
146
+
147
  c = el.get("center") or {}
148
  return c.get("lat"), c.get("lon")
149
+
150
  def main():
151
  elements = fetch_overpass(BBOX)
152
  seen = set()
153
  rows = []
154
+
155
  for el in elements:
156
  el_type = el.get("type")
157
+
158
  el_id = el.get("id")
159
  tags = el.get("tags", {}) or {}
160
+
161
  key = (el_type, el_id)
162
  if key in seen:
163
  continue
164
  seen.add(key)
165
+
166
  lat, lon = element_coords(el)
167
  if lat is None or lon is None:
168
  continue
169
+
170
  name = tags.get("name") or "(Unnamed)"
171
  addr = compose_address(tags)
172
  types = build_types(tags)
173
  osm_link = osm_browse_link(el_type, el_id)
174
  image_link = extract_image_link(tags)
175
+
176
  likely_sporty = (
177
  "leisure" in tags and tags["leisure"] in
178
  {"sports_centre", "fitness_centre", "stadium", "pitch", "swimming_pool", "track"}
179
  ) or ("sport" in tags)
180
+
181
  if not likely_sporty:
182
  continue
183
+
184
  rows.append({
185
  "name": name,
186
  "address": addr,
 
192
  "osm_type": el_type,
193
  "osm_id": el_id,
194
  })
195
+
196
  fieldnames = ["name", "address", "lat", "lng", "types", "osm_link", "image_link", "osm_type", "osm_id"]
197
  with open(OUT_CSV, "w", newline="", encoding="utf-8") as f:
198
  w = csv.DictWriter(f, fieldnames=fieldnames)
199
  w.writeheader()
200
  for row in rows:
201
  w.writerow(row)
202
+
203
  print(f"Saved {len(rows)} places to {OUT_CSV}")
204
+
205
  if __name__ == "__main__":
206
  main()
207
+
208
  ```