| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """PMC Open Access Subset enriched from XML.""" |
|
|
| import datetime |
| import pandas as pd |
| import numpy as np |
| from itertools import compress, chain |
| from collections import defaultdict |
| import re |
| from lxml import etree |
| import json |
|
|
| import datasets |
|
|
|
|
| |
| |
| _CITATION = "" |
|
|
| _DESCRIPTION = """\ |
| The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under |
| license terms that allow reuse. |
| Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles |
| in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more |
| liberal redistribution and reuse than a traditional copyrighted work. |
| The PMC Open Access Subset is one part of the PMC Article Datasets |
| |
| This version takes XML version as source, benefiting from the structured text |
| to split the articles in parts, naming the introduction, methods, results, |
| discussion and conclusion, and refers with keywords in the text to external or internal |
| resources (articles, figures, tables, formulas, boxed-text, quotes, code, footnotes, chemicals, graphics, medias). |
| """ |
|
|
| _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" |
|
|
| |
| _LICENSE = """ |
| https://www.ncbi.nlm.nih.gov/pmc/about/copyright/ |
| |
| Within the PMC Open Access Subset, there are three groupings: |
| |
| Commercial Use Allowed - CC0, CC BY, CC BY-SA, CC BY-ND licenses |
| Non-Commercial Use Only - CC BY-NC, CC BY-NC-SA, CC BY-NC-ND licenses; and |
| Other - no machine-readable Creative Commons license, no license, or a custom license. |
| """ |
|
|
| _URL_ROOT = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/" |
| _URL = _URL_ROOT+"oa_bulk/{subset}/xml/" |
|
|
| _SUBSETS = { |
| "commercial": "oa_comm", |
| "non_commercial": "oa_noncomm", |
| "other": "oa_other", |
| } |
| _BASELINE_DATE = "2023-12-18" |
|
|
| REFS_KEYS = ["pmid_ref", "unknown_pub_ref", "figure_ref", "table_ref", "formula_ref", "box_ref", "code_ref", |
| "quote_ref", "chemical_ref", "supplementary_ref", "footnote_ref", "graphic_ref", "media_ref"] |
| CONTENT_KEYS = ["introduction", "methods", "results", "discussion", "conclusion", |
| "front", "body", "back", "figure", "table", "formula", "box", |
| "code", "quote", "chemical", "supplementary", "footnote"] |
| begin_doc_rgx = re.compile("""<!DOCTYPE.*""") |
| def clean_raw(xml_text): |
| """ |
| Fixes the formating of xml of files and returns it. |
| Some have bad formating but they can be fixed/improved |
| """ |
| |
| |
|
|
| begin_doc = begin_doc_rgx.search(xml_text) |
| xml_text = xml_text[begin_doc.start():] |
|
|
| |
| |
| return xml_text |
|
|
| |
| TAG_DIC = {"fig":("FIG","figure_ref"), "table-wrap":("TAB","table_ref"), |
| "array":("TAB","table_ref"), "boxed-text":("BOX","box_ref"), |
| "graphic":("GRAPH","graphic_ref"), "inline-graphic":("GRAPH","graphic_ref"), |
| "media":("MEDIA","media_ref"), "inline-media":("MEDIA","media_ref"), |
| "disp-formula":("FORMU","formula_ref"), "inline-formula":("FORMU","formula_ref"), |
| "table-wrap-foot":("FOOTN","footnote_ref"), "fn-group":("FOOTN","footnote_ref"), |
| "code":("CODE","code_ref"), "chem-struct-wrap":("CHEM","chemical_ref"), |
| "disp-quote":("QUOTE","quote_ref"), "speech":("QUOTE","quote_ref"), |
| "supplementary-material":("SUPPL","supplementary_ref"), |
| "inline-supplementary-material":("SUPPL","supplementary_ref")} |
|
|
| def get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys): |
| """ |
| For all the element found as xref, give them an index to be later found in their corresponding section. |
| Also sort them into the different types of references (eg <array> and <table-wrap> are both |
| labeled as table_ref). |
| """ |
| count_ref_d = defaultdict(lambda:0) |
| reference_d = {} |
| for k, v in refs_pmid.items(): |
| reference_d[k] = (v, "REF", "pmid_ref") |
| for i, k in enumerate(refs_nonpmid_keys): |
| reference_d[k] = (i, "UREF", "unknown_pub_ref") |
|
|
| refs_key_l = [] |
| for el in ref_el_l: |
| keyword, ref_name = TAG_DIC[el.tag] |
| idx = count_ref_d[ref_name] |
| key = el.attrib["id"] if "id" in el.attrib.keys() else f"{el.tag}{idx}" |
| reference_d[key] = (idx, keyword, ref_name) |
| refs_key_l.append(key) |
| count_ref_d[ref_name]+=1 |
| return reference_d, refs_key_l |
|
|
| def parseout_el_refs(el, rids): |
| """ |
| Extract the text from the tag opening to its closing, discarding the tail's text. |
| Removes xml namespace from the text for storage savings, such as: |
| - xmlns:xlink="http://www.w3.org/1999/xlink" |
| - xmlns:mml="http://www.w3.org/1998/Math/MathML" |
| |
| Extract then from the text all the references founds to the rids dictionnary, |
| and replace them by keywords of the corresponding family (eg "##FIG##4##Doe 2022##" for a figure, |
| "##TAB##0##Table 1##" for a table, or "##MATHS##1##(2)##" for mathematical formulas) |
| |
| The range reference (e.g. 1-3 or 15-17) are replaced by their range (1,2,3 or 15,16,17) |
| |
| Returns the parsed text |
| """ |
| for xref in el.xpath(".//xref"): |
| inner_text = "".join(xref.itertext()) |
| if inner_text == "": |
| tail = xref.tail if xref.tail else "" |
| prev_el = xref.getprevious() |
| parent = xref.getparent() |
| if prev_el is None: |
| parent.text = "".join([(parent.text if parent.text else ""), tail]) |
| else: |
| prev_el.tail = "".join([(prev_el.tail if prev_el.tail else ""), tail]) |
| parent.remove(xref) |
|
|
| res_rid = defaultdict(list) |
| res_reftext = defaultdict(list) |
| ref_rstart, ref_rstop = None, None |
| has_ref_range = None |
| for xref in el.xpath(".//xref[not(ancestor::xref)]"): |
| inner_text = "".join(xref.itertext()) |
| parent = xref.getparent() |
| rid = xref.get("rid") |
| if rid in rids.keys(): |
| ref_idx, ref_kword, ref_class = rids[rid] |
| res_rid[ref_class].append(ref_idx) |
| res_reftext[ref_class].append(inner_text) |
|
|
| tail = xref.tail if xref.tail else "" |
| |
| try: |
| if has_ref_range is None: |
| if ref_kword in ["UREF", "REF"]: |
| has_ref_range = res_reftext[ref_class][-1].isnumeric() and int(res_reftext[ref_class][-1]) < 500 |
|
|
| if has_ref_range and ref_kword in ["UREF", "REF"]: |
| if tail=="-": |
| ref_rstart = int(res_reftext[ref_class][-1]) |
| tail = ", " |
| elif ref_rstart is not None: |
| ref_rstop = int(res_reftext[ref_class][-1]) |
| new_ref_kwords = [f"##{ref_kword}##{ref_idx}##{inner_text}##"] |
| for i in range(ref_rstart+1, ref_rstop): |
| new_rid = re.sub(str(ref_rstop), str(i), rid, count=1) |
| ref_idx_, ref_kword_, ref_class_ = rids[new_rid] |
| res_rid[ref_class_].insert(-1, ref_idx_) |
| res_reftext[ref_class_].insert(-1, str(i)) |
| new_ref_kwords.insert(-1, f"##{ref_kword_}##{ref_idx_}##{str(i)}##") |
| ref_kword = ", ".join(new_ref_kwords) |
| ref_rstart = None |
| except (KeyError, ValueError): |
| ref_rstart = None |
| continue |
| |
|
|
| prev_el = xref.getprevious() |
| if prev_el is None: |
| parent.text = "".join([(parent.text if parent.text else ""), f"##{ref_kword}##{ref_idx}##{inner_text}##", tail]) |
| else: |
| prev_el.tail = "".join([(prev_el.tail if prev_el.tail else ""), f"##{ref_kword}##{ref_idx}##{inner_text}##", tail]) |
| parent.remove(xref) |
|
|
| text = etree.tostring(el, with_tail=False, encoding='unicode', method='xml') |
| |
| tag_start = text.find(">")+1 |
| tag_txt = text[:tag_start] |
|
|
| for k, v in el.nsmap.items(): |
| tag_txt = tag_txt.replace(f' xmlns:{k}="{v}"', "", 1) |
|
|
| text = "".join([tag_txt, text[tag_start:]]) |
|
|
| return text |
|
|
|
|
| def get_references(article_tree): |
| """ |
| Retrieve two dictionnaries of the bibr references for that article. |
| The first has the references' PMID for those having one. |
| The second contains the <ref> tag fields, that could be identified to retrieve the |
| referenced documents (some have PMID that could be found from the title and authors of a document). |
| """ |
| references_pmid = {} |
| references_nonpmid = [] |
| references_nonpmid_keys = [] |
| refs = article_tree.find(".//ref-list") |
| if refs is None: |
| return {}, [], [] |
| refs = refs.findall("ref") |
| for i, ref in enumerate(refs): |
| pmid = None |
| for pubid in ref.findall(".//pub-id"): |
| if pubid.get("pub-id-type") == "pmid": |
| pmid = int(pubid.text) |
| break |
| if pmid is not None and pmid<100000000: |
| |
| |
| |
|
|
| |
| references_pmid[ref.attrib["id"]] = str(pmid) |
| else: |
| ref_key = ref.attrib["id"] if "id" in ref.attrib.keys() else f"URef{i+1}" |
| citation_d = defaultdict(list) |
| |
| for el in ref.iterdescendants(): |
| if isinstance(el.text, str) and isinstance(el.tag, str): |
| citation_d[el.tag].append(el.text) |
| references_nonpmid.append(dict(citation_d)) |
| references_nonpmid_keys.append(ref_key) |
| return references_pmid, references_nonpmid, references_nonpmid_keys |
|
|
| def construct_datadict(article_tree): |
| """ |
| Where the magic happens. A long script that: |
| - Get the external references (from pmid if present) |
| - Get glossary and remove it from the document |
| - Find internal references (figures, tables, ...) and build a xref dictionary |
| - Extract paragraphs and titles with their path in the document |
| - Titles are used to identify ["introduction", "methods", "results" and "discussion"] |
| - The path are then used to group paragraphs and titles into corresponding content. |
| - Remaining p and title are put in three other section: front, body, back |
| |
| Returns: |
| - content_d: Dictionnary with the content result |
| - reference_d: The references of each kind (figure, table, ...) for each content type (intro, figure caption, ...) |
| - reference_text_d: The replaced text by the keywords of the references, with keys matching reference_d. |
| - reference_count: The count of unique external-document references. |
| |
| Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/ |
| """ |
| res_content_d = {} |
|
|
| refs_pmid, refs_nonpmid, refs_nonpmid_keys = get_references(article_tree) |
| reference_count = len(refs_pmid)+len(refs_nonpmid) |
|
|
| res_content_d["unknown_pub"] = json.dumps(refs_nonpmid) |
| refs_el = article_tree.find(".//ref-list") |
| if refs_el is not None: |
| refs_el.getparent().remove(refs_el) |
|
|
| |
| glossary = {} |
| def search_def(el): |
| for item in el.findall(".//def-item"): |
| abbrev = item.find(".//term") |
| if abbrev is None: |
| continue |
| k = "".join(abbrev.itertext()) |
| definition = item.find(".//def") |
| definition = "".join(definition.itertext()) if definition is not None else "" |
| glossary[k] = definition |
|
|
| for el in article_tree.findall(".//glossary"): |
| search_def(el) |
| el.getparent().remove(el) |
| for el in article_tree.findall(".//def-list"): |
| search_def(el) |
| el.getparent().remove(el) |
| res_content_d["glossary"] = glossary |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| ref_el_l = article_tree.xpath(".//fig|.//table-wrap|.//array|.//supplementary-material\ |
| |.//inline-supplementary-material|.//disp-formula\ |
| |.//inline-formula|.//graphic|.//inline-graphic\ |
| |.//media|.//inline-media|.//boxed-text\ |
| |.//table-wrap-foot|.//fn-group|.//chem-struct-wrap\ |
| |.//code|.//disp-quote|.//speech") |
| rids, key_l = get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys) |
| text_l_d = defaultdict(list) |
| for el, key in zip(ref_el_l[::-1], key_l[::-1]): |
| |
| |
| new_text = parseout_el_refs(el, rids) |
|
|
| ref_class = rids[key][2] |
| text_l_d[ref_class].insert(0, new_text) |
|
|
| repl_xref = etree.Element("xref", attrib={"rid":key}) |
| repl_xref.tail = el.tail |
| el.addprevious(repl_xref) |
| el.getparent().remove(el) |
|
|
| |
| for ref_k in REFS_KEYS[2:]: |
| res_content_d[ref_k[:-4]] = text_l_d[ref_k] |
|
|
| path_l, text_l = [], [] |
| t_paths, t_texts_lowcase = [], [] |
| for part in ["front", "body", "back"]: |
| tmp_path_l, tmp_text_l = [], [] |
| tmp_t_paths, tmp_t_texts_lowcase = [], [] |
| part_el = article_tree.find(".//"+part) |
| if part_el is None: |
| res_content_d[part] = [] |
| continue |
| |
| |
| for el in part_el.xpath(".//p[not(ancestor::p) and not(ancestor::title)]| .//title[not(ancestor::p) and not(ancestor::title)]"): |
| new_text = parseout_el_refs(el, rids) |
| tmp_path_l.append(article_tree.getelementpath(el)) |
| tmp_text_l.append(new_text) |
| if el.tag=="title": |
| tmp_t_paths.append(tmp_path_l[-1]) |
| tmp_t_texts_lowcase.append(new_text.lower()) |
| if part=="body": |
| path_l, text_l = tmp_path_l, tmp_text_l |
| t_paths, t_texts_lowcase = tmp_t_paths, tmp_t_texts_lowcase |
| else: |
| res_content_d[part] = tmp_text_l |
|
|
| |
| mask_intro = np.array(["introduction" in t_text or "background" in t_text for t_text in t_texts_lowcase]).astype(bool) |
| mask_metho = np.array(["method" in t_text for t_text in t_texts_lowcase]).astype(bool) |
| mask_resul = np.array(["result" in t_text for t_text in t_texts_lowcase]).astype(bool) |
| mask_discu = np.array(["discussion" in t_text for t_text in t_texts_lowcase]).astype(bool) |
| mask_concl = np.array(["conclusion" in t_text for t_text in t_texts_lowcase]).astype(bool) |
| processed_mask = np.zeros(len(text_l), dtype="bool") |
| for mask, name_section in zip([mask_intro, mask_metho, mask_resul, mask_discu, mask_concl], |
| ["introduction", "methods", "results", "discussion", "conclusion"]): |
| if not np.any(mask): |
| res_content_d[name_section] = [] |
| continue |
|
|
| filtered_path_l = list(compress(t_paths, mask)) |
| levels = np.array([len(path.split("/")) for path in filtered_path_l]) |
| root_path = filtered_path_l[np.argmin(levels)] |
| root_path = root_path[:root_path.rindex("/")] |
| mask_contents = np.array([path.startswith(root_path) for path in path_l]).astype(bool) |
| processed_mask |= mask_contents |
| res_content_d[name_section] = list(compress(text_l, mask_contents)) |
|
|
| processed_mask = ~processed_mask |
| res_content_d["body"] = list(compress(text_l, processed_mask)) |
|
|
| return (res_content_d, reference_count) |
|
|
| class OpenAccessXMLConfig(datasets.BuilderConfig): |
| """BuilderConfig for the PMC Open Access Subset.""" |
|
|
| def __init__(self, subsets=None, **kwargs): |
| """BuilderConfig for the PMC Open Access Subset. |
| Args: |
| subsets (:obj:`List[str]`): List of subsets/groups to load. |
| **kwargs: Keyword arguments forwarded to super. |
| """ |
| subsets = [subsets] if isinstance(subsets, str) else subsets |
| super().__init__( |
| name="+".join(subsets), **kwargs, |
| ) |
| self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys()) |
|
|
|
|
| class OpenAccessXML(datasets.GeneratorBasedBuilder): |
| """PMC Open Access Subset enriched from XML files.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
| BUILDER_CONFIG_CLASS = OpenAccessXMLConfig |
| BUILDER_CONFIGS = [OpenAccessXMLConfig(subsets="all")] + [OpenAccessXMLConfig(subsets=subset) for subset in _SUBSETS] |
| DEFAULT_CONFIG_NAME = "all" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "accession_id": datasets.Value("string"), |
| "pmid": datasets.Value("string"), |
|
|
| "introduction": datasets.features.Sequence(datasets.Value("string")), |
| "methods": datasets.features.Sequence(datasets.Value("string")), |
| "results": datasets.features.Sequence(datasets.Value("string")), |
| "discussion": datasets.features.Sequence(datasets.Value("string")), |
| "conclusion": datasets.features.Sequence(datasets.Value("string")), |
|
|
| "front": datasets.features.Sequence(datasets.Value("string")), |
| "body": datasets.features.Sequence(datasets.Value("string")), |
| "back": datasets.features.Sequence(datasets.Value("string")), |
|
|
| "figure": datasets.features.Sequence(datasets.Value("string")), |
| "table": datasets.features.Sequence(datasets.Value("string")), |
| "formula": datasets.features.Sequence(datasets.Value("string")), |
| "box": datasets.features.Sequence(datasets.Value("string")), |
| "code": datasets.features.Sequence(datasets.Value("string")), |
| "quote": datasets.features.Sequence(datasets.Value("string")), |
| "chemical": datasets.features.Sequence(datasets.Value("string")), |
| "supplementary": datasets.features.Sequence(datasets.Value("string")), |
| "footnote": datasets.features.Sequence(datasets.Value("string")), |
| "graphic": datasets.features.Sequence(datasets.Value("string")), |
| "media": datasets.features.Sequence(datasets.Value("string")), |
|
|
| "unknown_pub": datasets.Value("string"), |
| |
| "glossary": datasets.features.Sequence( |
| {"acronym": datasets.Value("string"), "definition": datasets.Value("string")} |
| ), |
| "n_references": datasets.Value("int32"), |
| "license": datasets.Value("string"), |
| "retracted": datasets.Value("string"), |
| "last_updated": datasets.Value("string"), |
| "citation": datasets.Value("string"), |
| "package_file": datasets.Value("string"), |
| } |
| ), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| incremental_paths = { |
| "incremental_file_lists": [], |
| "incremental_archives": [] |
| } |
|
|
| baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv") |
|
|
| baseline_file_lists = [] |
| baseline_archives = [] |
| for subset in self.config.subsets: |
| url = _URL.format(subset=_SUBSETS[subset]) |
| basename = f"{_SUBSETS[subset]}_xml." |
| |
| baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(9)] |
|
|
| for baseline in baselines: |
| baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" |
| baseline_archive_url = f"{url}{basename}{baseline}.tar.gz" |
| try: |
| baseline_file_list = dl_manager.download(baseline_file_list_url) |
| baseline_archive = dl_manager.download(baseline_archive_url) |
| except FileNotFoundError: |
| continue |
|
|
| baseline_file_lists.append(baseline_file_list) |
| baseline_archives.append(baseline_archive) |
|
|
| baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" |
|
|
| |
| |
| |
| date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE) |
| incremental_dates = [ |
| (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat() |
| for i in range(date_delta.days) |
| ] |
| incrementals = [f"incr.{date}" for date in incremental_dates] |
| for incremental in incrementals: |
| incremental_file_list_url = f"{url}{basename}{incremental}.filelist.csv" |
| incremental_archive_url = f"{url}{basename}{incremental}.tar.gz" |
| try: |
| incremental_file_list = dl_manager.download(incremental_file_list_url) |
| incremental_archive = dl_manager.download(incremental_archive_url) |
| except FileNotFoundError: |
| continue |
| incremental_paths["incremental_file_lists"].append(incremental_file_list) |
| incremental_paths["incremental_archives"].append(incremental_archive) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "baseline_file_lists": baseline_file_lists, |
| "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives], |
| "baseline_package_list": baseline_package_list, |
| "incremental_file_lists": incremental_paths["incremental_file_lists"], |
| "incremental_archives": [dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list, incremental_file_lists, incremental_archives): |
| |
| oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID") |
| oa_package_list = oa_package_list[["File"]] |
| oa_package_list.sort_index(inplace=True) |
| processed_ids = set() |
|
|
| |
| if incremental_file_lists: |
| for incremental_file_list, incremental_archive in zip(incremental_file_lists[::-1], incremental_archives[::-1]): |
| try: |
| incrementals = pd.read_csv(incremental_file_list, index_col="AccessionID") |
| except FileNotFoundError: |
| continue |
| incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File") |
| incrementals.File = incrementals.File.fillna('') |
| incrementals = incrementals.to_dict(orient="index") |
|
|
| for path, file in incremental_archive: |
| data = incrementals.pop(path) |
| pmcid = data["AccessionID"] |
| if pmcid in processed_ids: |
| continue |
| content = file.read() |
| try: |
| text = content.decode("utf-8").strip() |
| except UnicodeDecodeError as e: |
| text = content.decode("latin-1").strip() |
| text = clean_raw(text) |
| try: |
| article_tree = etree.ElementTree(etree.fromstring(text)) |
| except etree.XMLSyntaxError: |
| continue |
|
|
| content_d, n_ref = construct_datadict(article_tree) |
| glossary = np.array([[k,v] for k,v in content_d["glossary"].items()]) |
| data = { |
| "introduction": content_d["introduction"], |
| "methods": content_d["methods"], |
| "results": content_d["results"], |
| "discussion": content_d["discussion"], |
| "conclusion": content_d["conclusion"], |
| "front": content_d["front"], |
| "body": content_d["body"], |
| "back": content_d["back"], |
| "figure": content_d["figure"], |
| "table": content_d["table"], |
| "formula": content_d["formula"], |
| "box": content_d["box"], |
| "code": content_d["code"], |
| "quote": content_d["quote"], |
| "chemical": content_d["chemical"], |
| "supplementary": content_d["supplementary"], |
| "footnote": content_d["footnote"], |
| "graphic": content_d["graphic"], |
| "media": content_d["media"], |
| |
| "unknown_pub": content_d["unknown_pub"], |
| "glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]}, |
| "n_references": n_ref, |
| "pmid": data["PMID"], |
| "accession_id": pmcid, |
| "license": data["License"], |
| "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
| "retracted": data["Retracted"], |
| "citation": data["Article Citation"], |
| "package_file": data["File"], |
| } |
| processed_ids.add(pmcid) |
| yield pmcid, data |
|
|
| |
| for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives): |
|
|
| |
| baselines = pd.read_csv(baseline_file_list, index_col="AccessionID") |
| baselines = baselines.join(oa_package_list).reset_index().set_index("Article File") |
| baselines.File = baselines.File.fillna('') |
| baselines = baselines.to_dict(orient="index") |
|
|
| for path, file in baseline_archive: |
| data = baselines.pop(path) |
| pmcid = data["AccessionID"] |
| if pmcid in processed_ids: |
| continue |
| content = file.read() |
| try: |
| text = content.decode("utf-8").strip() |
| except UnicodeDecodeError as e: |
| text = content.decode("latin-1").strip() |
| text = clean_raw(text) |
| try: |
| article_tree = etree.ElementTree(etree.fromstring(text)) |
| except etree.XMLSyntaxError: |
| continue |
|
|
| content_d, n_ref = construct_datadict(article_tree) |
| glossary = np.array([[k,v] for k,v in content_d["glossary"].items()]) |
| data = { |
| "introduction": content_d["introduction"], |
| "methods": content_d["methods"], |
| "results": content_d["results"], |
| "discussion": content_d["discussion"], |
| "conclusion": content_d["conclusion"], |
| "front": content_d["front"], |
| "body": content_d["body"], |
| "back": content_d["back"], |
| "figure": content_d["figure"], |
| "table": content_d["table"], |
| "formula": content_d["formula"], |
| "box": content_d["box"], |
| "code": content_d["code"], |
| "quote": content_d["quote"], |
| "chemical": content_d["chemical"], |
| "supplementary": content_d["supplementary"], |
| "footnote": content_d["footnote"], |
| "graphic": content_d["graphic"], |
| "media": content_d["media"], |
| |
| "unknown_pub": content_d["unknown_pub"], |
| "glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]}, |
| "n_references": n_ref, |
| "pmid": data["PMID"], |
| "accession_id": pmcid, |
| "license": data["License"], |
| "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
| "retracted": data["Retracted"], |
| "citation": data["Article Citation"], |
| "package_file": data["File"], |
| } |
| processed_ids.add(pmcid) |
| yield pmcid, data |
|
|
| |
| |
|
|