id,original,modified 20227,"def send_test_notifications(url=None, oid=OID, errors=ERRORS): """""" Send fake notifiations to school endpoints to help troubleshoot. You can send test notifications to school endpoints to confirm reception, or send to httpbin to check the outbound IP (origin), headers, and the data payload. Examples: - print(send_test_notifications()) - print(send_test_notifications(url=BINPOST)) """""" prod_endpoints = { 'edmc': Contact.objects.get(name='EDMC').endpoint, 'bpi': Contact.objects.get(name='Bridgepoint Education').endpoint } if not url: urls = list(prod_endpoints.values()) else: urls = [url] payload = { 'oid': oid, 'time': f""{datetime.datetime.now().isoformat()}+00:00"", # noqa 'errors': errors } msg = """" for _url in urls: try: resp = requests.post(_url, data=payload, timeout=10) except requests.exceptions.ConnectTimeout: msg += f""Post to {_url} timed out.\n"" else: report = REPORT.format( _url, resp.ok, resp.reason, resp.status_code, payload['time'], ) msg += f""{report}\n"" if resp.content: msg += f""Response content: {resp.content}\n"" return msg ","def send_test_notifications(url=None, oid=OID, errors=ERRORS): """""" Send fake notifications to school endpoints to help troubleshoot. You can send test notifications to school endpoints to confirm reception, or send to httpbin to check the outbound IP (origin), headers, and the data payload. Examples: - print(send_test_notifications()) - print(send_test_notifications(url=BINPOST)) """""" prod_endpoints = { 'edmc': Contact.objects.get(name='EDMC').endpoint, 'bpi': Contact.objects.get(name='Bridgepoint Education').endpoint } if not url: urls = list(prod_endpoints.values()) else: urls = [url] payload = { 'oid': oid, 'time': f""{datetime.datetime.now().isoformat()}+00:00"", # noqa 'errors': errors } msg = """" for _url in urls: try: resp = requests.post(_url, data=payload, timeout=10) except requests.exceptions.ConnectTimeout: msg += f""Post to {_url} timed out.\n"" else: report = REPORT.format( _url, resp.ok, resp.reason, resp.status_code, payload['time'], ) msg += f""{report}\n"" if resp.content: msg += f""Response content: {resp.content}\n"" return msg " 32172,"def main(): options = arguments_handler() pr_number = options.pr_number github_token = options.github_token org_name = 'demisto' repo_name = 'content' github_client: Github = Github(github_token, verify=False) content_repo: Repository = github_client.get_repo(f'{org_name}/{repo_name}') pr: PullRequest = content_repo.get_pull(int(pr_number)) t = Terminal() pr_label_names = [label.name for label in pr.labels] is_contribution_form_filled_label_exist = CONTRIBUTION_FORM_FILLED_LABEL in pr_label_names is_community_label_exist = COMMUNITY_LABEL in pr_label_names is_partner_label_exist = PARTNER_LABEL in pr_label_names is_internal_label_exist = INTERNAL_LABEL in pr_label_names print(f'{t.cyan}Check that {CONTRIBUTION_FORM_FILLED_LABEL} label exist in PR {pr_number}') if not is_contribution_form_filled_label_exist: print( f'{t.red}ERROR: Contribution form was not filled for PR: {pr_number}.\nMake sure to register your' f' contribution by filling the contribution registration form in - https://forms.gle/XDfxU4E61ZwEESSMA' ) sys.exit(1) print(f'{t.cyan}Check that one of Community/Partner/Internal labels exist in PR {pr_number}') if not (is_community_label_exist ^ is_partner_label_exist ^ is_internal_label_exist): print( f'{t.red}ERROR: PR labels {pr_label_names} ' f'must contain one of {COMMUNITY_LABEL}/{PARTNER_LABEL}/{INTERNAL_LABEL} labels' ) sys.exit(1) print(f'{t.cyan}PR labels {pr_label_names} are valid') print(f'{t.cyan} Contribution form was filled successfully for PR: {pr_number}') sys.exit(0) ","def main(): options = arguments_handler() pr_number = options.pr_number github_token = options.github_token org_name = 'demisto' repo_name = 'content' github_client: Github = Github(github_token, verify=False) content_repo: Repository = github_client.get_repo(f'{org_name}/{repo_name}') pr: PullRequest = content_repo.get_pull(int(pr_number)) t = Terminal() pr_label_names = [label.name for label in pr.labels] is_contribution_form_filled_label_exist = CONTRIBUTION_FORM_FILLED_LABEL in pr_label_names is_community_label_exist = COMMUNITY_LABEL in pr_label_names is_partner_label_exist = PARTNER_LABEL in pr_label_names is_internal_label_exist = INTERNAL_LABEL in pr_label_names print(f'{t.cyan}Checking if {CONTRIBUTION_FORM_FILLED_LABEL} label exist in PR {pr_number}') if not is_contribution_form_filled_label_exist: print( f'{t.red}ERROR: Contribution form was not filled for PR: {pr_number}.\nMake sure to register your' f' contribution by filling the contribution registration form in - https://forms.gle/XDfxU4E61ZwEESSMA' ) sys.exit(1) print(f'{t.cyan}Check that one of Community/Partner/Internal labels exist in PR {pr_number}') if not (is_community_label_exist ^ is_partner_label_exist ^ is_internal_label_exist): print( f'{t.red}ERROR: PR labels {pr_label_names} ' f'must contain one of {COMMUNITY_LABEL}/{PARTNER_LABEL}/{INTERNAL_LABEL} labels' ) sys.exit(1) print(f'{t.cyan}PR labels {pr_label_names} are valid') print(f'{t.cyan} Contribution form was filled successfully for PR: {pr_number}') sys.exit(0) " 24660,"def test_iteration_with_nested_iterator(): hydrogen = IonizationState(""p+"", n_elem=1e20 * u.m**-3, T_e=10 * u.eV) i = sum(1 for _, __ in itertools.product(hydrogen, hydrogen)) assert i == 4 ","def test_iteration_with_nested_iterator(): hydrogen = IonizationState(""p+"", n_elem=1e20 * u.m**-3, T_e=10 * u.eV) i = len(itertools.product(hydrogen, hydrogen)) assert i == 4 " 17725,"def _init_hoomd_14_pairs( structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0 ): """"""Special_pairs to handle 14 scalings See discussion: https://groups.google.com/forum/ #!topic/hoomd-users/iZ9WCpHczg0"""""" # Update neighborlist to exclude 1-4 interactions, # but impose a special_pair force to handle these pairs nl.exclusions.append(""1-4"") if hoomd.context.current.system_definition.getPairData().getN() == 0: print(""No 1,4 pairs found in hoomd snapshot"") return None, None lj_14 = hoomd.md.special_pair.lj() qq_14 = hoomd.md.special_pair.coulomb() params_14 = {} # Identify unique 14 scalings for adjust in structure.adjusts: t1 = adjust.atom1.type t2 = adjust.atom2.type ps = ""-"".join(sorted([t1, t2])) if ps not in params_14: params_14[ps] = adjust.type for name, adjust_type in params_14.items(): lj_14.pair_coeff.set( name, sigma=adjust_type.sigma / ref_distance, # The adjust epsilon alreayd carries the scaling epsilon=adjust_type.epsilon / ref_energy, # Do NOT use hoomd's alpha to modify any LJ terms alpha=1, r_cut=r_cut, ) qq_14.pair_coeff.set(name, alpha=adjust_type.chgscale, r_cut=r_cut) return lj_14, qq_14 ","def _init_hoomd_14_pairs( structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0 ): """"""Special_pairs to handle 14 scalings See discussion: https://groups.google.com/forum/ #!topic/hoomd-users/iZ9WCpHczg0"""""" # Update neighborlist to exclude 1-4 interactions, # but impose a special_pair force to handle these pairs nl.exclusions.append(""1-4"") if hoomd.context.current.system_definition.getPairData().getN() == 0: print(""No 1,4 pairs found in hoomd snapshot"") return None, None lj_14 = hoomd.md.special_pair.lj() qq_14 = hoomd.md.special_pair.coulomb() params_14 = {} # Identify unique 14 scalings for adjust in structure.adjusts: t1 = adjust.atom1.type t2 = adjust.atom2.type ps = ""-"".join(sorted([t1, t2])) if ps not in params_14: params_14[ps] = adjust.type for name, adjust_type in params_14.items(): lj_14.pair_coeff.set( name, sigma=adjust_type.sigma / ref_distance, # The adjust epsilon already carries the scaling epsilon=adjust_type.epsilon / ref_energy, # Do NOT use hoomd's alpha to modify any LJ terms alpha=1, r_cut=r_cut, ) qq_14.pair_coeff.set(name, alpha=adjust_type.chgscale, r_cut=r_cut) return lj_14, qq_14 " 39440,"def test_copy_no_copy_wrap_object(datasets): for dataset in datasets: # different dataset tyoes have different copy behavior for points # use point data which is common dataset[""data""] = np.ones(dataset.n_points) new_dataset = type(dataset)(dataset) new_dataset[""data""] += 1 assert np.array_equal(new_dataset[""data""], dataset[""data""]) for dataset in datasets: # different dataset tyoes have different copy behavior for points # use point data which is common dataset[""data""] = np.ones(dataset.n_points) new_dataset = type(dataset)(dataset, deep=True) new_dataset[""data""] += 1 assert not np.any(new_dataset[""data""] == dataset[""data""]) ","def test_copy_no_copy_wrap_object(datasets): for dataset in datasets: # different dataset tyoes have different copy behavior for points # use point data which is common dataset[""data""] = np.ones(dataset.n_points) new_dataset = type(dataset)(dataset) new_dataset[""data""] += 1 assert np.array_equal(new_dataset[""data""], dataset[""data""]) for dataset in datasets: # different dataset types have different copy behavior for points # use point data which is common dataset[""data""] = np.ones(dataset.n_points) new_dataset = type(dataset)(dataset, deep=True) new_dataset[""data""] += 1 assert not np.any(new_dataset[""data""] == dataset[""data""]) " 47896,"def main(): log.basicConfig(format=""[ %(levelname)s ] %(message)s"", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # load vocabulary file for model log.info(""Loading vocab file:\t{}"".format(args.vocab)) with open(args.vocab, ""r"", encoding=""utf-8"") as r: vocab = dict((t.rstrip(""\n""), i) for i, t in enumerate(r.readlines())) log.info(""{} tokens loaded"".format(len(vocab))) # get context as a string (as we might need it's length for the sequence reshape) context = get_context(args) # encode context into token ids list c_tokens_id, c_tokens_se = text_to_tokens(context, vocab) log.info(""Initializing Inference Engine"") ie = IECore() log.info(""Device is {}"".format(args.device)) version = ie.get_versions(args.device)[args.device] version_str = ""{}.{}.{}"".format(version.major, version.minor, version.build_number) log.info(""Plugin version is {}"".format(version_str)) # read IR model_xml = args.model model_bin = os.path.splitext(model_xml)[0] + "".bin"" log.info(""Loading network files:\n\t{}\n\t{}"".format(model_xml, model_bin)) ie_encoder = ie.read_network(model=model_xml, weights=model_bin) if args.reshape: # reshape the sequence length to the context + maximum question length (in tokens) first_input_layer = next(iter(ie_encoder.inputs)) c = ie_encoder.inputs[first_input_layer].shape[1] # find the closest multiple of 64 seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64) if seq < c: input_info = list(ie_encoder.inputs) new_shapes = dict([]) for i in input_info: n, c = ie_encoder.inputs[i].shape new_shapes[i] = [n, seq] log.info(""Reshaped input {} from {} to the {}"".format(i, ie_encoder.inputs[i].shape, new_shapes[i])) log.info(""Attempting to reshape the network to the modified inputs..."") try: ie_encoder.reshape(new_shapes) log.info(""Successful!"") except: log.info(""Failed...reloading the network"") ie_encoder = ie.read_network(model=model_xml, weights=model_bin) log.info(""Done"") else: log.info(""Skipping network reshaping,"" "" as (context length + max question length) exceeds the current (input) network sequence length"") # check input and output names input_names_model = list(ie_encoder.inputs.keys()) output_names_model = list(ie_encoder.outputs.keys()) input_names = eval(args.input_names) output_names = eval(args.output_names) if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names): log.error(""Input or Output names do not match"") log.error("" Network input->output names: {}->{}"".format(input_names_model, output_names_model)) log.error("" Expected (from the demo cmd-line) input->output names: {}->{}"".format(input_names, output_names)) raise Exception(""Unexpected network input or output names"") # load model to the device log.info(""Loading model to the {}"".format(args.device)) ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device) # loop on user's questions while True: question = input('Type question (enter to exit):') if not question: break q_tokens_id, _ = text_to_tokens(question, vocab) # maximum number of tokens that can be processed by network at once max_length = ie_encoder.inputs[input_names[0]].shape[1] # calculate number of tokens for context in each inference request. # reserve 3 positions for special tokens # [CLS] q_tokens [SEP] c_tokens [SEP] c_wnd_len = max_length - (len(q_tokens_id) + 3) # token num between two neighbour context windows # 1/2 means that context windows are overlapped by half c_stride = c_wnd_len // 2 t0 = time.time() t_count = 0 # array of answers from each window answers = [] # init a window to iterate over context c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id)) # iterate while context window is not empty while c_e > c_s: # form the request tok_cls = vocab['[CLS]'] tok_sep = vocab['[SEP]'] input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep] token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0] attention_mask = [1] * len(input_ids) # pad the rest of the request pad_len = max_length - len(input_ids) input_ids += [0] * pad_len token_type_ids += [0] * pad_len attention_mask += [0] * pad_len # create numpy inputs for IE inputs = { input_names[0]: np.array([input_ids], dtype=np.int32), input_names[1]: np.array([attention_mask], dtype=np.int32), input_names[2]: np.array([token_type_ids], dtype=np.int32), } t_start = time.time() # infer by IE res = ie_encoder_exec.infer(inputs=inputs) t_end = time.time() t_count += 1 log.info(""Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)"".format( max_length, 1 / (t_end - t_start), t_end - t_start )) # get start-end scores for context def get_score(name): out = np.exp(res[name].reshape((max_length,))) return out / out.sum(axis=-1) score_s = get_score(output_names[0]) score_e = get_score(output_names[1]) # get 'no-answer' score (not valid if model has been fine-tuned on squad1.x) if args.model_squad_ver.split('.')[0] == '1': score_na = 0 else: score_na = score_s[0] * score_e[0] # find product of all start-end combinations to find the best one c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor score_mat = np.matmul( score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)), score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s)) ) # reset candidates with end before start score_mat = np.triu(score_mat) # reset long candidates (>max_answer_token_num) score_mat = np.tril(score_mat, args.max_answer_token_num - 1) # find the best start-end pair max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1]) max_score = score_mat[max_s, max_e] * (1 - score_na) # convert to context text start-end index max_s = c_tokens_se[c_s + max_s][0] max_e = c_tokens_se[c_s + max_e][1] # check that answers list does not have duplicates (because of context windows overlapping) same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e] if same: assert len(same) == 1 # update exist answer record a = answers[same[0]] answers[same[0]] = (max(max_score, a[0]), max_s, max_e) else: # add new record answers.append((max_score, max_s, max_e)) # check that context window reach the end if c_e == len(c_tokens_id): break # move to next window position c_s = min(c_s + c_stride, len(c_tokens_id)) c_e = min(c_s + c_wnd_len, len(c_tokens_id)) t1 = time.time() log.info(""{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)"".format( t_count, max_length, t1 - t0, (t1 - t0) / t_count )) # print top 3 results answers = list(sorted(answers, key=lambda x: -x[0])) for score, s, e in answers[:3]: log.info(""---answer: {:0.2f} {}"".format(score, context[s:e])) c_s, c_e = find_sentence_range(context, s, e) log.info("" "" + context[c_s:s] + ""\033[91m"" + context[s:e] + '\033[0m' + context[e:c_e]) ","def main(): log.basicConfig(format=""[ %(levelname)s ] %(message)s"", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # load vocabulary file for model log.info(""Loading vocab file:\t{}"".format(args.vocab)) with open(args.vocab, ""r"", encoding=""utf-8"") as r: vocab = dict((t.rstrip(""\n""), i) for i, t in enumerate(r.readlines())) log.info(""{} tokens loaded"".format(len(vocab))) # get context as a string (as we might need it's length for the sequence reshape) context = get_context(args) # encode context into token ids list c_tokens_id, c_tokens_se = text_to_tokens(context, vocab) log.info(""Initializing Inference Engine"") ie = IECore() log.info(""Device is {}"".format(args.device)) version = ie.get_versions(args.device)[args.device] version_str = ""{}.{}.{}"".format(version.major, version.minor, version.build_number) log.info(""Plugin version is {}"".format(version_str)) # read IR model_xml = args.model model_bin = os.path.splitext(model_xml)[0] + "".bin"" log.info(""Loading network files:\n\t{}\n\t{}"".format(model_xml, model_bin)) ie_encoder = ie.read_network(model=model_xml, weights=model_bin) if args.reshape: # reshape the sequence length to the context + maximum question length (in tokens) first_input_layer = next(iter(ie_encoder.inputs)) c = ie_encoder.inputs[first_input_layer].shape[1] # find the closest multiple of 64 seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64) if seq < c: input_info = list(ie_encoder.inputs) new_shapes = dict([]) for i in input_info: n, c = ie_encoder.inputs[i].shape new_shapes[i] = [n, seq] log.info(""Reshaped input {} from {} to the {}"".format(i, ie_encoder.inputs[i].shape, new_shapes[i])) log.info(""Attempting to reshape the network to the modified inputs..."") try: ie_encoder.reshape(new_shapes) log.info(""Successful!"") except: log.info(""Failed...reloading the network"") ie_encoder = ie.read_network(model=model_xml, weights=model_bin) log.info(""Done"") else: log.info(""Skipping network reshaping,"" "" as (context length + max question length) exceeds the current (input) network sequence length"") # check input and output names input_names_model = list(ie_encoder.inputs.keys()) output_names_model = list(ie_encoder.outputs.keys()) input_names = eval(args.input_names) output_names = eval(args.output_names) if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names): log.error(""Input or Output names do not match"") log.error("" Network input->output names: {}->{}"".format(input_names_model, output_names_model)) log.error("" Expected (from the demo cmd-line) input->output names: {}->{}"".format(input_names, output_names)) raise Exception(""Unexpected network input or output names"") # load model to the device log.info(""Loading model to the {}"".format(args.device)) ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device) # loop on user's questions while True: question = input('Type question (enter to exit):') if not question: break q_tokens_id, _ = text_to_tokens(question, vocab) # maximum number of tokens that can be processed by network at once max_length = ie_encoder.inputs[input_names[0]].shape[1] # calculate number of tokens for context in each inference request. # reserve 3 positions for special tokens # [CLS] q_tokens [SEP] c_tokens [SEP] c_wnd_len = max_length - (len(q_tokens_id) + 3) # token num between two neighbour context windows # 1/2 means that context windows are overlapped by half c_stride = c_wnd_len // 2 t0 = time.time() t_count = 0 # array of answers from each window answers = [] # init a window to iterate over context c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id)) # iterate while context window is not empty while c_e > c_s: # form the request tok_cls = vocab['[CLS]'] tok_sep = vocab['[SEP]'] input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep] token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0] attention_mask = [1] * len(input_ids) # pad the rest of the request pad_len = max_length - len(input_ids) input_ids += [0] * pad_len token_type_ids += [0] * pad_len attention_mask += [0] * pad_len # create numpy inputs for IE inputs = { input_names[0]: np.array([input_ids], dtype=np.int32), input_names[1]: np.array([attention_mask], dtype=np.int32), input_names[2]: np.array([token_type_ids], dtype=np.int32), } t_start = time.time() # infer by IE res = ie_encoder_exec.infer(inputs=inputs) t_end = time.time() t_count += 1 log.info(""Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)"".format( max_length, 1 / (t_end - t_start), t_end - t_start )) # get start-end scores for context def get_score(name): out = np.exp(res[name].reshape((max_length,))) return out / out.sum(axis=-1) score_s = get_score(output_names[0]) score_e = get_score(output_names[1]) # get 'no-answer' score (not valid if model has been fine-tuned on squad1.x) if args.model_squad_ver.split('.')[0] == '1': score_na = 0 else: score_na = score_s[0] * score_e[0] # find product of all start-end combinations to find the best one c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor score_mat = np.matmul( score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)), score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s)) ) # reset candidates with end before start score_mat = np.triu(score_mat) # reset long candidates (>max_answer_token_num) score_mat = np.tril(score_mat, args.max_answer_token_num - 1) # find the best start-end pair max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1]) max_score = score_mat[max_s, max_e] * (1 - score_na) # convert to context text start-end index max_s = c_tokens_se[c_s + max_s][0] max_e = c_tokens_se[c_s + max_e][1] # check that answers list does not have duplicates (because of context windows overlapping) same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e] if same: assert len(same) == 1 # update existing answer record a = answers[same[0]] answers[same[0]] = (max(max_score, a[0]), max_s, max_e) else: # add new record answers.append((max_score, max_s, max_e)) # check that context window reach the end if c_e == len(c_tokens_id): break # move to next window position c_s = min(c_s + c_stride, len(c_tokens_id)) c_e = min(c_s + c_wnd_len, len(c_tokens_id)) t1 = time.time() log.info(""{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)"".format( t_count, max_length, t1 - t0, (t1 - t0) / t_count )) # print top 3 results answers = list(sorted(answers, key=lambda x: -x[0])) for score, s, e in answers[:3]: log.info(""---answer: {:0.2f} {}"".format(score, context[s:e])) c_s, c_e = find_sentence_range(context, s, e) log.info("" "" + context[c_s:s] + ""\033[91m"" + context[s:e] + '\033[0m' + context[e:c_e]) " 6448,"def create_sample_collection(lab_test, template, patient, invoice): if(frappe.db.get_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test') == '1'): sample_collection = create_sample_doc(template, patient, invoice, lab_test.company) if(sample_collection): lab_test.sample = sample_collection.name return lab_test ","def create_sample_collection(lab_test, template, patient, invoice): if(frappe.db.get_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test') == '1'): sample_collection = create_sample_doc(template, patient, invoice, lab_test.company) if sample_collection: lab_test.sample = sample_collection.name return lab_test " 28204,"def make_unique(s, existing): """""" Make string `s` unique, able to be added to a sequence `existing` of existing names without duplication, by ``appending _`` to it if needed. """""" n = 1 s_out = s existing = set(existing) while s_out in existing: n += 1 s_out = '{}_{}'.format(s, n) return s_out ","def make_unique(s, existing): """""" Make string ``s`` unique, able to be added to a sequence ``existing`` of existing names without duplication, by ``appending _`` to it if needed. """""" n = 1 s_out = s existing = set(existing) while s_out in existing: n += 1 s_out = '{}_{}'.format(s, n) return s_out " 37326,"def is_parameterized(value: Any) -> bool: """"""Shorthand for a frequently checked predicate. ParameterExpressions cannot be validated until they are numerically assigned. """""" return isinstance(value, ParameterExpression) ","def _is_parameterized(value: Any) -> bool: """"""Shorthand for a frequently checked predicate. ParameterExpressions cannot be validated until they are numerically assigned. """""" return isinstance(value, ParameterExpression) " 4651,"def str_cast_to_int(object, name, value): """""" A function that validates the value is a str and then convert it to an int using its length. """""" if not isinstance(value, str): raise TraitError(""Not an string!"") return len(value) ","def str_cast_to_int(object, name, value): """""" A function that validates the value is a str and then converts it to an int using its length. """""" if not isinstance(value, str): raise TraitError(""Not an string!"") return len(value) " 38302,"def _accumulate_scalar_field(p, field_vals): r"""""" This function integrates a scalar field along a path. It uses a similar method to that in _accumulate_vector_field, but the integral is now: ..math:: I = \int_C \phi(x1,x2,...,xn)d\vec{r} Parameters ---------- p : YTArray The path to be integrated along fieldVals : YTArray An array containing the values of the scalar field to be integrated at the location of the starting point of each path segment as well as the endpoint for the last segment Returns ------- accum : YTArray The cumulative value of the field integral at each path segment """""" # https://en.wikipedia.org/wiki/Line_integral # np.linalg.norm returns a ndarray, so when multiplied by field_vals, which # is a YTArray, this leads to incorrect units. There does not appear to be # a unyt implementation of norm, as far as I'm aware, so units will be # handled manually for the time being accum = np.cumsum(field_vals[:-1].d * np.linalg.norm(p[1:].d - p[:-1].d, axis=1)) accum = YTArray(accum, field_vals.units * p.units) return accum ","def _accumulate_scalar_field(p, field_vals): r"""""" This function integrates a scalar field along a path. It uses a similar method to that in _accumulate_vector_field, but the integral is now: ..math:: I = \int_C \phi(x1,x2,...,xn)d\vec{r} Parameters ---------- p : YTArray The path to be integrated along field_vals : YTArray An array containing the values of the scalar field to be integrated at the location of the starting point of each path segment as well as the endpoint for the last segment Returns ------- accum : YTArray The cumulative value of the field integral at each path segment """""" # https://en.wikipedia.org/wiki/Line_integral # np.linalg.norm returns a ndarray, so when multiplied by field_vals, which # is a YTArray, this leads to incorrect units. There does not appear to be # a unyt implementation of norm, as far as I'm aware, so units will be # handled manually for the time being accum = np.cumsum(field_vals[:-1].d * np.linalg.norm(p[1:].d - p[:-1].d, axis=1)) accum = YTArray(accum, field_vals.units * p.units) return accum " 2963,"def test_series_grouper_requires_nonempty(): obj = Series(np.random.randn(10)) dummy = obj[:0] labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) with pytest.raises(ValueError, match=""requires non-empty""): libreduction.SeriesGrouper(dummy, np.mean, labels, 2, dummy) ","def test_series_grouper_requires_nonempty_raises(): obj = Series(np.random.randn(10)) dummy = obj[:0] labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) with pytest.raises(ValueError, match=""requires non-empty""): libreduction.SeriesGrouper(dummy, np.mean, labels, 2, dummy) " 13079,"def test_checkout_add_voucher_code_with_display_gross_prices( api_client, checkout_with_item, voucher, site_settings, monkeypatch ): channel = checkout_with_item.channel tc = channel.tax_configuration tc.display_gross_prices = True tc.save() tc.country_exceptions.all().delete() previous_checkout_last_change = checkout_with_item.last_change voucher = voucher voucher_channel_listing = voucher.channel_listings.first() voucher_channel_listing.min_spent_amount = 100 voucher_channel_listing.save() monkeypatch.setattr( ""saleor.checkout.utils.base_calculations.base_checkout_lines_total"", lambda checkout_lines, channel, currency, discounts: TaxedMoney( Money(95, ""USD""), Money(100, ""USD"") ), ) variables = { ""id"": to_global_id_or_none(checkout_with_item), ""promoCode"": voucher.code, } data = _mutate_checkout_add_promo_code(api_client, variables) assert not data[""errors""] assert data[""checkout""][""token""] == str(checkout_with_item.token) assert data[""checkout""][""voucherCode""] == voucher.code checkout_with_item.refresh_from_db() assert checkout_with_item.last_change != previous_checkout_last_change ","def test_checkout_add_voucher_code_with_display_gross_prices( api_client, checkout_with_item, voucher, site_settings, monkeypatch ): channel = checkout_with_item.channel tc = channel.tax_configuration tc.display_gross_prices = True tc.save(update_fields=[""display_gross_prices""]) tc.country_exceptions.all().delete() previous_checkout_last_change = checkout_with_item.last_change voucher = voucher voucher_channel_listing = voucher.channel_listings.first() voucher_channel_listing.min_spent_amount = 100 voucher_channel_listing.save() monkeypatch.setattr( ""saleor.checkout.utils.base_calculations.base_checkout_lines_total"", lambda checkout_lines, channel, currency, discounts: TaxedMoney( Money(95, ""USD""), Money(100, ""USD"") ), ) variables = { ""id"": to_global_id_or_none(checkout_with_item), ""promoCode"": voucher.code, } data = _mutate_checkout_add_promo_code(api_client, variables) assert not data[""errors""] assert data[""checkout""][""token""] == str(checkout_with_item.token) assert data[""checkout""][""voucherCode""] == voucher.code checkout_with_item.refresh_from_db() assert checkout_with_item.last_change != previous_checkout_last_change " 24311,"def get_latest_tag(pattern=None, tag_prefix='v'): """""" Return the highest numbered tag (most recent) Filters on pattern first, otherwise based off all tags Removes prefixed `v` if applicable """""" if not pattern: pattern = rf'^({tag_prefix})?\d+\.\d+\.\d+.*' all_tags = sorted((parse_version_info(t.replace(tag_prefix, '', 1)), t) for t in git_tag_list(pattern)) if not all_tags: return else: # reverse so we have descendant order return list(reversed(all_tags))[0][1] ","def get_latest_tag(pattern=None, tag_prefix='v'): """""" Return the highest numbered tag (most recent) Filters on pattern first, otherwise based off all tags Removes prefixed `v` if applicable """""" if not pattern: pattern = rf'^({tag_prefix})?\d+\.\d+\.\d+.*' all_tags = sorted((parse_version_info(t.replace(tag_prefix, '', 1)), t) for t in git_tag_list(pattern)) if not all_tags: return else: # reverse so we have descending order return list(reversed(all_tags))[0][1] " 38669,"def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--retry-failed', metavar='NUM', action='store', default=None, help='Retry failed tests in a given runreport' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=os_ext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader if options.retry_failed: with open(options.retry_failed) as f: try: restart_report = json.load(f) except json.JSONDecodeError as e: raise ReframeFatalError( f""invalid runreport: '{restart_report}'"" ) from e schema_filename = os.path.join(reframe.INSTALL_PREFIX, 'reframe', 'schemas', 'runreport.json') with open(schema_filename) as f: try: schema = json.load(f) except json.JSONDecodeError as e: raise ReframeFatalError( f""invalid schema: '{schema_filename}'"" ) from e try: jsonschema.validate(restart_report, schema) except jsonschema.ValidationError as e: raise ValueError(f""could not validate restart runreport: "" f""'{restart_report}'"") from e failed_checks = set() failed_checks_prefixes = set() # for run in restart_report['runs']: for testcase in restart_report['runs'][-1]['testcases']: if testcase['result'] == 'failure': failed_checks.add(hash(testcase['name']) ^ hash(testcase['system']) ^ hash(testcase['environment'])) failed_checks_prefixes.add(testcase['prefix']) loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), #failed_checks_prefixes, ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts') ) else: loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': '1.0', 'hostname': socket.gethostname(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': os_ext.osuser(), 'version': os_ext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) # Disable hooks for c in checks_matched: for h in options.hooks: type(c).disable_hook(h) testcases_og = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) if options.retry_failed: failed_cases = [tc for tc in testcases_og if tc.__hash__() in failed_checks] cases_graph = dependency.build_deps(failed_cases, testcases_og) testcases = dependency.toposort(cases_graph, is_subgraph=True) restored_tests = set() for c in testcases: for d in c.deps: if d.__hash__() not in failed_checks: restored_tests.add(d) else: testgraph = dependency.build_deps(testcases_og) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list or options.list_detailed: list_checks(list(checks_matched), printer, options.list_detailed) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(opt) == 1: parsed_job_options.append(f'-{opt}') else: parsed_job_options.append(f'--{opt}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) if options.retry_failed: runner.restore(restored_tests, restart_report) runner.runall(testcases, testcases_og) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( os_ext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats } report_file = generate_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) ","def main(): # Setup command line options argparser = argparse.ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) locate_options = argparser.add_argument_group( 'Options for discovering checks' ) select_options = argparser.add_argument_group( 'Options for selecting checks' ) action_options = argparser.add_argument_group( 'Options controlling actions' ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) env_options = argparser.add_argument_group( 'Options controlling the ReFrame environment' ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options output_options.add_argument( '--prefix', action='store', metavar='DIR', help='Set general directory prefix to DIR', envvar='RFM_PREFIX', configvar='systems/prefix' ) output_options.add_argument( '-o', '--output', action='store', metavar='DIR', help='Set output directory prefix to DIR', envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir' ) output_options.add_argument( '-s', '--stage', action='store', metavar='DIR', help='Set stage directory prefix to DIR', envvar='RFM_STAGE_DIR', configvar='systems/stagedir' ) output_options.add_argument( '--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT', help=('Append a timestamp to the output and stage directory prefixes ' '(default: ""%%FT%%T"")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) output_options.add_argument( '--perflogdir', action='store', metavar='DIR', help=('Set performance log data directory prefix ' '(relevant only to the filelog log handler)'), envvar='RFM_PERFLOG_DIR', configvar='logging/handlers_perflog/filelog_basedir' ) output_options.add_argument( '--keep-stage-files', action='store_true', help='Keep stage directories even for successful checks', envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files' ) output_options.add_argument( '--dont-restage', action='store_false', dest='clean_stagedir', help='Reuse the test stage directory', envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir' ) output_options.add_argument( '--save-log-files', action='store_true', default=False, help='Save ReFrame log files to the output directory', envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( '--report-file', action='store', metavar='FILE', help=""Store JSON run report in FILE"", envvar='RFM_REPORT_FILE', configvar='general/report_file' ) # Check discovery options locate_options.add_argument( '-c', '--checkpath', action='append', metavar='PATH', help=""Add PATH to the check search path list"", envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path' ) locate_options.add_argument( '-R', '--recursive', action='store_true', help='Search for checks in the search path recursively', envvar='RFM_CHECK_SEARCH_RECURSIVE', configvar='general/check_search_recursive' ) locate_options.add_argument( '--ignore-check-conflicts', action='store_true', help='Skip checks with conflicting names', envvar='RFM_IGNORE_CHECK_CONFLICTS', configvar='general/ignore_check_conflicts' ) # Select options select_options.add_argument( '-t', '--tag', action='append', dest='tags', metavar='PATTERN', default=[], help='Select checks with at least one tag matching PATTERN' ) select_options.add_argument( '-n', '--name', action='append', dest='names', default=[], metavar='PATTERN', help='Select checks whose name matches PATTERN' ) select_options.add_argument( '-x', '--exclude', action='append', dest='exclude_names', metavar='PATTERN', default=[], help='Exclude checks whose name matches PATTERN' ) select_options.add_argument( '-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN', help=('Select checks with at least one ' 'programming environment matching PATTERN') ) select_options.add_argument( '--gpu-only', action='store_true', help='Select only GPU checks' ) select_options.add_argument( '--cpu-only', action='store_true', help='Select only CPU checks' ) # Action options action_options.add_argument( '-l', '--list', action='store_true', help='List the selected checks' ) action_options.add_argument( '-L', '--list-detailed', action='store_true', help='List the selected checks providing details for each test' ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) # Run options run_options.add_argument( '-J', '--job-option', action='append', metavar='OPT', dest='job_options', default=[], help='Pass option OPT to job scheduler' ) run_options.add_argument( '--force-local', action='store_true', help='Force local execution of checks' ) run_options.add_argument( '--skip-sanity-check', action='store_true', help='Skip sanity checking' ) run_options.add_argument( '--skip-performance-check', action='store_true', help='Skip performance checking' ) run_options.add_argument( '--strict', action='store_true', help='Enforce strict performance checking' ) run_options.add_argument( '--skip-system-check', action='store_true', help='Skip system check' ) run_options.add_argument( '--skip-prgenv-check', action='store_true', help='Skip programming environment check' ) run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', choices=['async', 'serial'], default='async', help='Set the execution policy of ReFrame (default: ""async"")' ) run_options.add_argument( '--mode', action='store', help='Execution mode to use' ) run_options.add_argument( '--max-retries', metavar='NUM', action='store', default=0, help='Set the maximum number of times a failed regression test ' 'may be retried (default: 0)' ) run_options.add_argument( '--retry-failed', metavar='NUM', action='store', default=None, help='Retry failed tests in a given runreport' ) run_options.add_argument( '--flex-alloc-nodes', action='store', dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None, help='Set strategy for the flexible node allocation (default: ""idle"").' ) run_options.add_argument( '--disable-hook', action='append', metavar='NAME', dest='hooks', default=[], help='Disable a pipeline hook for this run' ) env_options.add_argument( '-M', '--map-module', action='append', metavar='MAPPING', dest='module_mappings', default=[], help='Add a module mapping', envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings' ) env_options.add_argument( '-m', '--module', action='append', default=[], metavar='MOD', dest='user_modules', help='Load module MOD before running any regression check', envvar='RFM_USER_MODULES ,', configvar='general/user_modules' ) env_options.add_argument( '--module-mappings', action='store', metavar='FILE', dest='module_map_file', help='Load module mappings from FILE', envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file' ) env_options.add_argument( '-u', '--unload-module', action='append', metavar='MOD', dest='unload_modules', default=[], help='Unload module MOD before running any regression check', envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules' ) env_options.add_argument( '--purge-env', action='store_true', dest='purge_env', default=False, help='Unload all modules before running any regression check', envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment' ) env_options.add_argument( '--non-default-craype', action='store_true', help='Test a non-default Cray Programming Environment', envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype' ) # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='store', dest='config_file', metavar='FILE', help='Set configuration file', envvar='RFM_CONFIG_FILE' ) misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) misc_options.add_argument( '--performance-report', action='store_true', help='Print a report for performance tests' ) misc_options.add_argument( '--show-config', action='store', nargs='?', const='all', metavar='PARAM', help='Print the value of configuration parameter PARAM and exit' ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( '--upgrade-config-file', action='store', metavar='OLD[:NEW]', help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax' ) misc_options.add_argument( '-V', '--version', action='version', version=os_ext.reframe_version() ) misc_options.add_argument( '-v', '--verbose', action='count', help='Increase verbosity level of output', envvar='RFM_VERBOSE', configvar='general/verbose' ) # Options not associated with command-line arguments argparser.add_argument( dest='graylog_server', envvar='RFM_GRAYLOG_ADDRESS', configvar='logging/handlers_perflog/graylog_address', help='Graylog server address' ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', configvar='logging/handlers_perflog/syslog_address', help='Syslog server address' ) argparser.add_argument( dest='ignore_reqnodenotavail', envvar='RFM_IGNORE_REQNODENOTAVAIL', configvar='schedulers/ignore_reqnodenotavail', action='store_true', help='Graylog server address' ) argparser.add_argument( dest='use_login_shell', envvar='RFM_USE_LOGIN_SHELL', configvar='general/use_login_shell', action='store_true', help='Use a login shell for job scripts' ) if len(sys.argv) == 1: argparser.print_help() sys.exit(1) # Parse command line options = argparser.parse_args() # First configure logging with our generic configuration so as to be able # to print pretty messages; logging will be reconfigured by user's # configuration later site_config = config.load_config( os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py') ) site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' 'please use RFM_GRAYLOG_ADDRESS instead' ) os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER') if options.upgrade_config_file is not None: old_config, *new_config = options.upgrade_config_file.split( ':', maxsplit=1) new_config = new_config[0] if new_config else None try: new_config = config.convert_old_config(old_config, new_config) except Exception as e: printer.error(f'could not convert file: {e}') sys.exit(1) printer.info( f'Conversion successful! ' f'The converted file can be found at {new_config!r}.' ) sys.exit(0) # Now configure ReFrame according to the user configuration file try: try: site_config = config.load_config(options.config_file) except ReframeDeprecationWarning as e: printer.warning(e) converted = config.convert_old_config(options.config_file) printer.warning( f""configuration file has been converted "" f""to the new syntax here: '{converted}'"" ) site_config = config.load_config(converted) site_config.validate() # We ignore errors about unresolved sections or configuration # parameters here, because they might be defined at the individual # partition level and will be caught when we will instantiating # internally the system and partitions later on. site_config.select_subconfig(options.system, ignore_resolve_errors=True) for err in options.update_config(site_config): printer.warning(str(err)) # Update options from the selected execution mode if options.mode: mode_args = site_config.get(f'modes/@{options.mode}/options') # Parse the mode's options and reparse the command-line options = argparser.parse_args(mode_args) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) logging.configure_logging(site_config) except (OSError, ConfigError) as e: printer.error(f'failed to load configuration: {e}') sys.exit(1) logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') printer.inc_verbosity(site_config.get('general/0/verbose')) try: runtime.init_runtime(site_config) except ConfigError as e: printer.error(f'failed to initialize runtime: {e}') sys.exit(1) rt = runtime.runtime() try: if site_config.get('general/0/module_map_file'): rt.modules_system.load_mapping_from_file( site_config.get('general/0/module_map_file') ) if site_config.get('general/0/module_mappings'): for m in site_config.get('general/0/module_mappings'): rt.modules_system.load_mapping(m) except (ConfigError, OSError) as e: printer.error('could not load module mappings: %s' % e) sys.exit(1) if (os_ext.samefile(rt.stage_prefix, rt.output_prefix) and not site_config.get('general/0/keep_stage_files')): printer.error(""stage and output refer to the same directory; "" ""if this is on purpose, please use the "" ""'--keep-stage-files' option."") sys.exit(1) # Show configuration after everything is set up if options.show_config: config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) else: value = rt.get_option(config_param) if value is None: printer.error( f'no such configuration parameter found: {config_param}' ) else: printer.info(json.dumps(value, indent=2)) sys.exit(0) printer.debug(format_env(options.env_vars)) # Setup the check loader if options.retry_failed: with open(options.retry_failed) as f: try: restart_report = json.load(f) except json.JSONDecodeError as e: raise ReframeFatalError( f'could not load report file: {filename!r}' ) from e schema_filename = os.path.join(reframe.INSTALL_PREFIX, 'reframe', 'schemas', 'runreport.json') with open(schema_filename) as f: try: schema = json.load(f) except json.JSONDecodeError as e: raise ReframeFatalError( f""invalid schema: '{schema_filename}'"" ) from e try: jsonschema.validate(restart_report, schema) except jsonschema.ValidationError as e: raise ValueError(f""could not validate restart runreport: "" f""'{restart_report}'"") from e failed_checks = set() failed_checks_prefixes = set() # for run in restart_report['runs']: for testcase in restart_report['runs'][-1]['testcases']: if testcase['result'] == 'failure': failed_checks.add(hash(testcase['name']) ^ hash(testcase['system']) ^ hash(testcase['environment'])) failed_checks_prefixes.add(testcase['prefix']) loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), #failed_checks_prefixes, ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts') ) else: loader = RegressionCheckLoader( load_path=site_config.get('general/0/check_search_path'), recurse=site_config.get('general/0/check_search_recursive'), ignore_conflicts=site_config.get( 'general/0/ignore_check_conflicts') ) def print_infoline(param, value): param = param + ':' printer.info(f"" {param.ljust(18)} {value}"") session_info = { 'cmdline': ' '.join(sys.argv), 'config_file': rt.site_config.filename, 'data_version': '1.0', 'hostname': socket.gethostname(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': os_ext.osuser(), 'version': os_ext.reframe_version(), 'workdir': os.getcwd(), } # Print command line printer.info(f""[ReFrame Setup]"") print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( f""launched by"", f""{session_info['user'] or ''}@{session_info['hostname']}"" ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline('settings file', f""{session_info['config_file']!r}"") print_infoline('check search path', f""{'(R) ' if loader.recurse else ''}"" f""{':'.join(loader.load_path)!r}"") print_infoline('stage directory', repr(session_info['prefix_stage'])) print_infoline('output directory', repr(session_info['prefix_output'])) printer.info('') try: # Locate and load checks try: checks_found = loader.load_all() except OSError as e: raise ReframeError from e # Filter checks by name checks_matched = checks_found if options.exclude_names: for name in options.exclude_names: checks_matched = filter(filters.have_not_name(name), checks_matched) if options.names: checks_matched = filter(filters.have_name('|'.join(options.names)), checks_matched) # Filter checks by tags for tag in options.tags: checks_matched = filter(filters.have_tag(tag), checks_matched) # Filter checks by prgenv if not options.skip_prgenv_check: for prgenv in options.prgenv: checks_matched = filter(filters.have_prgenv(prgenv), checks_matched) # Filter checks by system if not options.skip_system_check: checks_matched = filter( filters.have_partition(rt.system.partitions), checks_matched) # Filter checks further if options.gpu_only and options.cpu_only: printer.error(""options `--gpu-only' and `--cpu-only' "" ""are mutually exclusive"") sys.exit(1) if options.gpu_only: checks_matched = filter(filters.have_gpu_only(), checks_matched) elif options.cpu_only: checks_matched = filter(filters.have_cpu_only(), checks_matched) # Determine the allowed programming environments allowed_environs = {e.name for env_patt in options.prgenv for p in rt.system.partitions for e in p.environs if re.match(env_patt, e.name)} # Generate the test cases, validate dependencies and sort them checks_matched = list(checks_matched) # Disable hooks for c in checks_matched: for h in options.hooks: type(c).disable_hook(h) testcases_og = generate_testcases(checks_matched, options.skip_system_check, options.skip_prgenv_check, allowed_environs) if options.retry_failed: failed_cases = [tc for tc in testcases_og if tc.__hash__() in failed_checks] cases_graph = dependency.build_deps(failed_cases, testcases_og) testcases = dependency.toposort(cases_graph, is_subgraph=True) restored_tests = set() for c in testcases: for d in c.deps: if d.__hash__() not in failed_checks: restored_tests.add(d) else: testgraph = dependency.build_deps(testcases_og) dependency.validate_deps(testgraph) testcases = dependency.toposort(testgraph) # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() else: for m in site_config.get('general/0/unload_modules'): rt.modules_system.unload_module(m) # Load the environment for the current system try: runtime.loadenv(rt.system.preload_environ) except EnvironError as e: printer.error(""failed to load current system's environment; "" ""please check your configuration"") printer.debug(str(e)) raise for m in site_config.get('general/0/user_modules'): try: rt.modules_system.load_module(m, force=True) except EnvironError as e: printer.warning(""could not load module '%s' correctly: "" ""Skipping..."" % m) printer.debug(str(e)) options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle' # Act on checks success = True if options.list or options.list_detailed: list_checks(list(checks_matched), printer, options.list_detailed) elif options.run: # Setup the execution policy if options.exec_policy == 'serial': exec_policy = SerialExecutionPolicy() elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: # This should not happen, since choices are handled by # argparser printer.error(""unknown execution policy `%s': Exiting..."") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local exec_policy.strict_check = options.strict exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check exec_policy.keep_stage_files = site_config.get( 'general/0/keep_stage_files' ) try: errmsg = ""invalid option for --flex-alloc-nodes: '{0}'"" sched_flex_alloc_nodes = int(options.flex_alloc_nodes) if sched_flex_alloc_nodes <= 0: raise ConfigError(errmsg.format(options.flex_alloc_nodes)) except ValueError: sched_flex_alloc_nodes = options.flex_alloc_nodes exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes parsed_job_options = [] for opt in options.job_options: if opt.startswith('-') or opt.startswith('#'): parsed_job_options.append(opt) elif len(opt) == 1: parsed_job_options.append(f'-{opt}') else: parsed_job_options.append(f'--{opt}') exec_policy.sched_options = parsed_job_options try: max_retries = int(options.max_retries) except ValueError: raise ConfigError('--max-retries is not a valid integer: %s' % max_retries) from None runner = Runner(exec_policy, printer, max_retries) try: time_start = time.time() session_info['time_start'] = time.strftime( '%FT%T%z', time.localtime(time_start), ) if options.retry_failed: runner.restore(restored_tests, restart_report) runner.runall(testcases, testcases_og) finally: time_end = time.time() session_info['time_end'] = time.strftime( '%FT%T%z', time.localtime(time_end) ) session_info['time_elapsed'] = time_end - time_start # Print a retry report if we did any retries if runner.stats.failures(run=0): printer.info(runner.stats.retry_report()) # Print a failure report if we had failures in the last run if runner.stats.failures(): printer.info(runner.stats.failure_report()) success = False if options.failure_stats: printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) # Generate the report for this session report_file = os.path.normpath( os_ext.expandvars(rt.get_option('general/0/report_file')) ) basedir = os.path.dirname(report_file) if basedir: os.makedirs(basedir, exist_ok=True) # Build final JSON report run_stats = runner.stats.json() session_info.update({ 'num_cases': run_stats[0]['num_cases'], 'num_failures': run_stats[-1]['num_failures'] }) json_report = { 'session_info': session_info, 'runs': run_stats } report_file = generate_report_filename(report_file) try: with open(report_file, 'w') as fp: jsonext.dump(json_report, fp, indent=2) except OSError as e: printer.warning( f'failed to generate report in {report_file!r}: {e}' ) else: printer.error(""No action specified. Please specify `-l'/`-L' for "" ""listing or `-r' for running. "" ""Try `%s -h' for more options."" % argparser.prog) sys.exit(1) if not success: sys.exit(1) sys.exit(0) except KeyboardInterrupt: sys.exit(1) except ReframeError as e: printer.error(str(e)) sys.exit(1) except (Exception, ReframeFatalError): printer.error(format_exception(*sys.exc_info())) sys.exit(1) finally: try: if site_config.get('general/0/save_log_files'): logging.save_log_files(rt.output_prefix) except OSError as e: printer.error('could not save log file: %s' % e) sys.exit(1) " 30475,"def get_arguments_for_policy_command(policy_command_type, args): # type: (str, dict) -> (dict, str, Optional[str]) """""" Args: policy_command_type: create / update Returns: Returning a dict with all of the arguments. """""" description = args.get('description', '').encode('utf-8') from_part = args.get('fromPart', '').encode('utf-8') from_type = args.get('fromType', '').encode('utf-8') from_value = args.get('fromValue', '').encode('utf-8') to_type = args.get('toType', '').encode('utf-8') to_value = args.get('toValue', '').encode('utf-8') option = args.get('option', '').encode('utf-8') policy_obj = { 'description': description, 'fromPart': from_part, 'fromType': from_type, 'fromValue': from_value, 'toType': to_type, 'toValue': to_value } if policy_command_type == ""update"": policy_id = args.get('policy_id', '').encode('utf-8') if not policy_id: return_error(""You need to enter policy ID"") return policy_obj, option, policy_id demisto.results(""get arguments"") demisto.results(policy_obj) return policy_obj, option, None ","def get_arguments_for_policy_command(policy_command_type, args): # type: (str, dict) -> (dict, str, Optional[str]) """""" Args: policy_command_type (str): create / update Returns: Returning a dict with all of the arguments. """""" description = args.get('description', '').encode('utf-8') from_part = args.get('fromPart', '').encode('utf-8') from_type = args.get('fromType', '').encode('utf-8') from_value = args.get('fromValue', '').encode('utf-8') to_type = args.get('toType', '').encode('utf-8') to_value = args.get('toValue', '').encode('utf-8') option = args.get('option', '').encode('utf-8') policy_obj = { 'description': description, 'fromPart': from_part, 'fromType': from_type, 'fromValue': from_value, 'toType': to_type, 'toValue': to_value } if policy_command_type == ""update"": policy_id = args.get('policy_id', '').encode('utf-8') if not policy_id: return_error(""You need to enter policy ID"") return policy_obj, option, policy_id demisto.results(""get arguments"") demisto.results(policy_obj) return policy_obj, option, None " 51445,"def _get_handles_hist_legend(primitive, histtype): """""" Returns handles that can be used by legend. Deal with all hist types."""""" # why, matplotlib, why # https://stackoverflow.com/questions/47490586/change-the-legend-format-of-python-histogram import matplotlib as mpl def _get_color(obj): color = obj.get_facecolor() if color[-1] == 0: # no alpha, invisible color = obj.get_edgecolor() return color handles = primitive[-1] if ""step"" in histtype: handles = [mpl.lines.Line2D([], [], c=_get_color(obj[0])) for obj in handles] return handles ","def _get_handles_hist_legend(primitive, histtype): """""" Returns handles that can be used by legend. Deal with all hist types."""""" # why, matplotlib, why # https://stackoverflow.com/questions/47490586/change-the-legend-format-of-python-histogram plt = import_matplotlib_pyplot() def _get_color(obj): color = obj.get_facecolor() if color[-1] == 0: # no alpha, invisible color = obj.get_edgecolor() return color handles = primitive[-1] if ""step"" in histtype: handles = [plt.Line2D([], [], c=_get_color(obj[0])) for obj in handles] return handles " 52881,"def _namespace_has_repo_name(namespace, repo_name, resolution_cache): """""" Get all quay containers in the biocontainers repo """""" cache_key = ""galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"" if resolution_cache is not None and cache_key in resolution_cache: repo_names = resolution_cache.get(cache_key) else: log.debug(""Resolution cache miss: "" + cache_key) next_page = None repo_names = [] repos_headers = {""Accept-encoding"": ""gzip"", ""Accept"": ""application/json""} while True: repos_parameters = {""public"": ""true"", ""namespace"": namespace, ""next_page"": next_page} repos_response = requests.get( QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=MULLED_SOCKET_TIMEOUT, ) repos_response_json = repos_response.json() repos = repos_response_json[""repositories""] repo_names += [r[""name""] for r in repos] next_page = repos_response_json.get(""next_page"") if not next_page: break if resolution_cache is not None: resolution_cache[cache_key] = repo_names return repo_name in repo_names ","def _namespace_has_repo_name(namespace, repo_name, resolution_cache): """""" Get all quay containers in the biocontainers repo """""" cache_key = ""galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"" repo_names = [] if resolution_cache is not None: if cache_key in resolution_cache: repo_names = resolution_cache[""cache_key""] else: log.debug(""Resolution cache miss: %s"", cache_key) if not repo_names: next_page = None repos_headers = {""Accept-encoding"": ""gzip"", ""Accept"": ""application/json""} while True: repos_parameters = {""public"": ""true"", ""namespace"": namespace, ""next_page"": next_page} repos_response = requests.get( QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=MULLED_SOCKET_TIMEOUT, ) repos_response_json = repos_response.json() repos = repos_response_json[""repositories""] repo_names += [r[""name""] for r in repos] next_page = repos_response_json.get(""next_page"") if not next_page: break if resolution_cache is not None: resolution_cache[cache_key] = repo_names return repo_name in repo_names " 11402,"def test_cloud_custom_dict_ms_precision_is_eq_six_z_not(): cloud_custom_dict_with_extensions = { ""id"":""de0fd76c-4ef4-4dfb-ab3a-8f24a307e033"", ""source"":""https://egtest.dev/cloudcustomevent"", ""data"":{""team"": ""event grid squad""}, ""type"":""Azure.Sdk.Sample"", ""time"":""2021-02-18T20:18:10.123456Z"", ""specversion"":""1.0"", } event = CloudEvent.from_dict(cloud_custom_dict_with_extensions) assert event.data == {""team"": ""event grid squad""} assert event.__class__ == CloudEvent assert event.time.month == 2 assert event.time.day == 18 assert event.time.hour == 20 assert event.time.microsecond == 123456 ","def test_cloud_custom_dict_ms_precision_is_eq_six_z_not(): cloud_custom_dict_with_extensions = { ""id"":""de0fd76c-4ef4-4dfb-ab3a-8f24a307e034"", ""source"":""https://egtest.dev/cloudcustomevent"", ""data"":{""team"": ""event grid squad""}, ""type"":""Azure.Sdk.Sample"", ""time"":""2021-02-18T20:18:10.123456Z"", ""specversion"":""1.0"", } event = CloudEvent.from_dict(cloud_custom_dict_with_extensions) assert event.data == {""team"": ""event grid squad""} assert event.__class__ == CloudEvent assert event.time.month == 2 assert event.time.day == 18 assert event.time.hour == 20 assert event.time.microsecond == 123456 " 26739,"def init_xframe_protection(app): """""" Add X-Frame-Options header. Use it avoid click-jacking attacks, by ensuring that their content is not embedded into other sites. See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options """""" x_frame_enabled = conf.getboolean('webserver', 'X_FRAME_ENABLED', fallback=True) if not x_frame_enabled: return def apply_caching(response): response.headers[""X-Frame-Options""] = ""DENY"" return response app.after_request(apply_caching) ","def init_xframe_protection(app): """""" Add X-Frame-Options header. Use it to avoid click-jacking attacks, by ensuring that their content is not embedded into other sites. See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options """""" x_frame_enabled = conf.getboolean('webserver', 'X_FRAME_ENABLED', fallback=True) if not x_frame_enabled: return def apply_caching(response): response.headers[""X-Frame-Options""] = ""DENY"" return response app.after_request(apply_caching) " 8400,"def spectrum_from_column_mapping(table, column_mapping, wcs=None): """""" Given a table and a mapping of the table column names to attributes on the Spectrum1D object, parse the information into a Spectrum1D. Parameters ---------- table : :class:`~astropy.table.Table` The table object (e.g. returned from `Table.read('data_file')`). column_mapping : dict A dictionary describing the relation between the table columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the table column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the file column (or `None` to take unit from the table):: column_mapping = {'FLUX': ('flux', 'Jy'), 'WAVE': ('spectral_axis', 'um')} wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS` WCS object passed to the Spectrum1D initializer. """""" spec_kwargs = {} # Associate columns of the file with the appropriate spectrum1d arguments for col_name, (kwarg_name, cm_unit) in column_mapping.items(): # If the table object couldn't parse any unit information, # fallback to the column mapper defined unit tab_unit = table[col_name].unit if tab_unit and cm_unit is not None: # If the table unit is defined, retrieve the quantity array for # the column kwarg_val = u.Quantity(table[col_name], tab_unit) # Attempt to convert the table unit to the user-defined unit. logging.debug(""Attempting auto-convert of table unit '%s' to "" ""user-provided unit '%s'."", tab_unit, cm_unit) if not isinstance(cm_unit, u.Unit): cm_unit = u.Unit(cm_unit) if cm_unit.physical_type in ('length', 'frequency'): # Spectral axis column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral()) elif 'spectral flux' in cm_unit.physical_type: # Flux/error column information kwarg_val = kwarg_val.to( cm_unit, equivalencies=u.spectral_density(1 * u.AA)) elif tab_unit: # The user has provided no unit in the column mapping, so we # use the unit as defined in the table object. kwarg_val = u.Quantity(table[col_name], tab_unit) elif cm_unit is not None: # In this case, the user has defined a unit in the column mapping # but no unit has been defined in the table object. kwarg_val = u.Quantity(table[col_name], cm_unit) else: # Neither the column mapping nor the table contain unit information. # This may be desired e.g. for the mask or bit flag arrays. kwarg_val = table[col_name] spec_kwargs.setdefault(kwarg_name, kwarg_val) # Ensure that the uncertainties are a subclass of NDUncertainty if spec_kwargs.get('uncertainty') is not None: spec_kwargs['uncertainty'] = StdDevUncertainty( spec_kwargs.get('uncertainty')) return Spectrum1D(**spec_kwargs, wcs=wcs, meta=table.meta) ","def spectrum_from_column_mapping(table, column_mapping, wcs=None): """""" Given a table and a mapping of the table column names to attributes on the Spectrum1D object, parse the information into a Spectrum1D. Parameters ---------- table : :class:`~astropy.table.Table` The table object (e.g. returned from ``Table.read('data_file')``). column_mapping : dict A dictionary describing the relation between the table columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the table column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the file column (or `None` to take unit from the table):: column_mapping = {'FLUX': ('flux', 'Jy'), 'WAVE': ('spectral_axis', 'um')} wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS` WCS object passed to the Spectrum1D initializer. """""" spec_kwargs = {} # Associate columns of the file with the appropriate spectrum1d arguments for col_name, (kwarg_name, cm_unit) in column_mapping.items(): # If the table object couldn't parse any unit information, # fallback to the column mapper defined unit tab_unit = table[col_name].unit if tab_unit and cm_unit is not None: # If the table unit is defined, retrieve the quantity array for # the column kwarg_val = u.Quantity(table[col_name], tab_unit) # Attempt to convert the table unit to the user-defined unit. logging.debug(""Attempting auto-convert of table unit '%s' to "" ""user-provided unit '%s'."", tab_unit, cm_unit) if not isinstance(cm_unit, u.Unit): cm_unit = u.Unit(cm_unit) if cm_unit.physical_type in ('length', 'frequency'): # Spectral axis column information kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral()) elif 'spectral flux' in cm_unit.physical_type: # Flux/error column information kwarg_val = kwarg_val.to( cm_unit, equivalencies=u.spectral_density(1 * u.AA)) elif tab_unit: # The user has provided no unit in the column mapping, so we # use the unit as defined in the table object. kwarg_val = u.Quantity(table[col_name], tab_unit) elif cm_unit is not None: # In this case, the user has defined a unit in the column mapping # but no unit has been defined in the table object. kwarg_val = u.Quantity(table[col_name], cm_unit) else: # Neither the column mapping nor the table contain unit information. # This may be desired e.g. for the mask or bit flag arrays. kwarg_val = table[col_name] spec_kwargs.setdefault(kwarg_name, kwarg_val) # Ensure that the uncertainties are a subclass of NDUncertainty if spec_kwargs.get('uncertainty') is not None: spec_kwargs['uncertainty'] = StdDevUncertainty( spec_kwargs.get('uncertainty')) return Spectrum1D(**spec_kwargs, wcs=wcs, meta=table.meta) " 28553,"def get_draws(pyjags_samples: tp.Dict[str, np.ndarray], variables: tp.Optional[tp.Union[str, tp.Iterable[str]]] = None, warmup: bool = False, warmup_iterations: int = 0) \ -> tp.Tuple[tp.Dict[str, np.ndarray], tp.Dict[str, np.ndarray]]: """""" Parameters ---------- pyjags_samples: a didctionary mapping variable names to NumPy arrays of MCMC chains of samples with shape (parameter_dimension, chain_length, number_of_chains) variables: the variables to extract from the samples dictionary warmup: whether or not to return warmup draws in data_warmup warmup_iterations: the number of warmup iterations if any Returns ------- """""" data_warmup = OrderedDict() if variables is None: variables = list(pyjags_samples.keys()) elif isinstance(variables, str): variables = [variables] if not isinstance(variables, Sequence): raise TypeError('variables must be of type Sequence or str') variables = tuple(variables) if warmup_iterations > 0: warmup_samples, actual_samples =\ _split_pyjags_samples_in_warmup_and_actual_samples( samples=pyjags_samples, warmup_iterations=0, variable_names=variables) data = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=actual_samples, variable_names=variables) if warmup: data_warmup = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=warmup_samples, variable_names=variables) else: data = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=pyjags_samples, variable_names=variables) return data, data_warmup ","def get_draws(pyjags_samples: tp.Dict[str, np.ndarray], variables: tp.Optional[tp.Union[str, tp.Iterable[str]]] = None, warmup: bool = False, warmup_iterations: int = 0) \ -> tp.Tuple[tp.Dict[str, np.ndarray], tp.Dict[str, np.ndarray]]: """""" Parameters ---------- pyjags_samples: a dictionary mapping variable names to NumPy arrays of MCMC chains of samples with shape (parameter_dimension, chain_length, number_of_chains) variables: the variables to extract from the samples dictionary warmup: whether or not to return warmup draws in data_warmup warmup_iterations: the number of warmup iterations if any Returns ------- """""" data_warmup = OrderedDict() if variables is None: variables = list(pyjags_samples.keys()) elif isinstance(variables, str): variables = [variables] if not isinstance(variables, Sequence): raise TypeError('variables must be of type Sequence or str') variables = tuple(variables) if warmup_iterations > 0: warmup_samples, actual_samples =\ _split_pyjags_samples_in_warmup_and_actual_samples( samples=pyjags_samples, warmup_iterations=0, variable_names=variables) data = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=actual_samples, variable_names=variables) if warmup: data_warmup = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=warmup_samples, variable_names=variables) else: data = \ _convert_pyjags_samples_dictionary_to_arviz_samples_dictionary( samples=pyjags_samples, variable_names=variables) return data, data_warmup " 31089,"def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults: target_incident = args.get('target_incident', None) if not target_incident: raise ValueError('Target Incident ID not specified') tags = argToList(args.get('tags')) entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags}}) note_entries: List = [] md: str = '' if isinstance(entries, list) and len(entries) > 0: for n in entries: if 'Note' in n and n['Note'] is True: note_entries.append(n) if len(note_entries) > 0: demisto.executeCommand(""addEntries"", {""id"": target_incident, ""entries"": note_entries}) md = f'## {len(note_entries)} notes copied' else: md = '## No notes found' else: md = '## No notes found' return CommandResults(readable_output=md) ","def copy_notes_to_target_incident(args: Dict[str, Any]) -> CommandResults: target_incident = args.get('target_incident', None) if not target_incident: raise ValueError('Target Incident ID not specified') tags = argToList(args.get('tags')) entries = demisto.executeCommand('getEntries', {'filter': {'tags': tags}}) note_entries: List = [] md: str = '' if isinstance(entries, list) and len(entries) > 0: for entry in entries: if 'Note' in n and n['Note'] is True: note_entries.append(n) if len(note_entries) > 0: demisto.executeCommand(""addEntries"", {""id"": target_incident, ""entries"": note_entries}) md = f'## {len(note_entries)} notes copied' else: md = '## No notes found' else: md = '## No notes found' return CommandResults(readable_output=md) " 10106,"def check_if_valuemap_exists(module, zbx, name): """"""Checks if value map exists. Args: module: AnsibleModule object zbx: ZabbixAPI object name: Zabbix valuemap name Returns: tuple: First element is True if valuemap exisits and False otherwise. Second element is a dicionary of valuemap object if it exists. """""" try: valuemap_list = zbx.valuemap.get({ 'output': 'extend', 'selectMappings': 'extend', 'filter': {'name': [name]} }) if len(valuemap_list) < 1: return False, None else: return True, valuemap_list[0] except Exception as e: module.fail_json(msg=""Failed to get ID of the valuemap '{name}': {e}"".format(name=name, e=e)) ","def check_if_valuemap_exists(module, zbx, name): """"""Checks if value map exists. Args: module: AnsibleModule object zbx: ZabbixAPI object name: Zabbix valuemap name Returns: tuple: First element is True if valuemap exisits and False otherwise. Second element is a dictionary of valuemap object if it exists. """""" try: valuemap_list = zbx.valuemap.get({ 'output': 'extend', 'selectMappings': 'extend', 'filter': {'name': [name]} }) if len(valuemap_list) < 1: return False, None else: return True, valuemap_list[0] except Exception as e: module.fail_json(msg=""Failed to get ID of the valuemap '{name}': {e}"".format(name=name, e=e)) " 34963,"def _schedule_dense_small_batch(cfg, s, C): A, weights = C.op.input_tensors _, in_dim = get_const_tuple(weights.shape) cfg.define_split(""tile_k"", in_dim, num_outputs=2) if cfg.is_fallback: cfg[""tile_k""] = SplitEntity([-1, 64] if in_dim > 64 else [1, 64]) _, kf = cfg[""tile_k""].apply(s, C, C.op.reduce_axis[0]) CF = s.rfactor(C, kf) if C.op in s.outputs: Out = C else: Out = s.outputs[0].output(0) s[C].compute_at(s[Out], s[Out].op.axis[1]) s[Out].bind(s[Out].op.axis[0], te.thread_axis(""blockIdx.y"")) s[Out].bind(s[Out].op.axis[1], te.thread_axis(""blockIdx.x"")) tx = s[C].op.reduce_axis[0] thread_x = te.thread_axis(""threadIdx.x"") s[C].bind(tx, thread_x) s[CF].compute_at(s[C], tx) s[C].set_store_predicate(thread_x.var.equal(0)) s[Out].set_store_predicate(thread_x.var.equal(0)) ","def _schedule_dense_small_batch(cfg, s, C): _, weights = C.op.input_tensors _, in_dim = get_const_tuple(weights.shape) cfg.define_split(""tile_k"", in_dim, num_outputs=2) if cfg.is_fallback: cfg[""tile_k""] = SplitEntity([-1, 64] if in_dim > 64 else [1, 64]) _, kf = cfg[""tile_k""].apply(s, C, C.op.reduce_axis[0]) CF = s.rfactor(C, kf) if C.op in s.outputs: Out = C else: Out = s.outputs[0].output(0) s[C].compute_at(s[Out], s[Out].op.axis[1]) s[Out].bind(s[Out].op.axis[0], te.thread_axis(""blockIdx.y"")) s[Out].bind(s[Out].op.axis[1], te.thread_axis(""blockIdx.x"")) tx = s[C].op.reduce_axis[0] thread_x = te.thread_axis(""threadIdx.x"") s[C].bind(tx, thread_x) s[CF].compute_at(s[C], tx) s[C].set_store_predicate(thread_x.var.equal(0)) s[Out].set_store_predicate(thread_x.var.equal(0)) " 31069,"def fetch_incidents(client: Client, fetch_time: str, fetch_limit: int, last_run: dict, saved_report_id: str): """""" Performs the fetch incidents functionality of Demisto, which means that every minute if fetches incidents from Symantec DLP and uploads them to Demisto server. :param client: Demisto Client :param fetch_time: For the first time the integration is enabled with the fetch incidents functionality, the fetch time indicates from what time to start fetching existing incidents in Symantec DLP system. :param fetch_limit: Indicates how many incidents to fetch every minute :param last_run: Demisto last run object :param saved_report_id: The report ID to retrieve the incidents from :return: A list of Demisto incidents """""" # We use parse to get out time in datetime format and not iso, that's what Symantec DLP is expecting to get last_id_fetched = last_run.get('last_incident_id') if last_run and last_run.get('last_fetched_event_iso'): last_update_time = parse(last_run['last_fetched_event_iso']) else: last_update_time = parse_date_range(fetch_time)[0] incidents = [] incidents_ids = helpers.serialize_object(client.service.incidentList( savedReportId=saved_report_id, incidentCreationDateLaterThan=last_update_time )).get('incidentId', '') if incidents_ids: last_incident_time: str = '' last_incident_id: str = '' for incident_id in incidents_ids: if last_id_fetched == incident_id: # Skipping last incident from last cycle if fetched again continue if fetch_limit == 0: break fetch_limit -= 1 incident_details = json.dumps(helpers.serialize_object(client.service.incidentDetail( incidentId=incident_id )[0]), default=datetime_to_iso_format) incident_creation_time = json.loads(incident_details).get('incident', {}).get('incidentCreationDate') incident: dict = { 'rawJSON': incident_details, 'name': f'Symantec DLP incident {incident_id}', 'occurred': incident_creation_time } _, _, file_entries, _ = get_incident_binaries(client, incident_id, False, False) if file_entries: attachments: list = [] for file_entry in file_entries: attachments.append({ 'path': file_entry['FileID'], 'name': file_entry['File'] }) incident['attachment'] = attachments incidents.append(incident) if incident_id == incidents_ids[-1]: last_incident_time = incident_creation_time last_incident_id = incident_id demisto.setLastRun( { 'last_fetched_event_iso': last_incident_time, 'last_incident_id': last_incident_id } ) demisto.incidents(incidents) ","def fetch_incidents(client: Client, fetch_time: str, fetch_limit: int, last_run: dict, saved_report_id: str): """""" Performs the fetch incidents functionality of Demisto, which means that every minute if fetches incidents from Symantec DLP and uploads them to Demisto server. :param client: Demisto Client :param fetch_time: For the first time the integration is enabled with the fetch incidents functionality, the fetch time indicates from what time to start fetching existing incidents in Symantec DLP system. :param fetch_limit: Indicates how many incidents to fetch every minute :param last_run: Demisto last run object :param saved_report_id: The report ID to retrieve the incidents from :return: A list of Demisto incidents """""" # We use parse to get out time in datetime format and not iso, that's what Symantec DLP is expecting to get last_id_fetched = last_run.get('last_incident_id') if last_run and last_run.get('last_fetched_event_iso'): last_update_time = parse(last_run['last_fetched_event_iso']) else: last_update_time = parse_date_range(fetch_time)[0] incidents = [] incidents_ids = helpers.serialize_object(client.service.incidentList( savedReportId=saved_report_id, incidentCreationDateLaterThan=last_update_time )).get('incidentId', '') if incidents_ids: last_incident_time: str = '' last_incident_id: str = '' for incident_id in incidents_ids: if last_id_fetched and last_id_fetched == incident_id: # Skipping last incident from last cycle if fetched again continue if fetch_limit == 0: break fetch_limit -= 1 incident_details = json.dumps(helpers.serialize_object(client.service.incidentDetail( incidentId=incident_id )[0]), default=datetime_to_iso_format) incident_creation_time = json.loads(incident_details).get('incident', {}).get('incidentCreationDate') incident: dict = { 'rawJSON': incident_details, 'name': f'Symantec DLP incident {incident_id}', 'occurred': incident_creation_time } _, _, file_entries, _ = get_incident_binaries(client, incident_id, False, False) if file_entries: attachments: list = [] for file_entry in file_entries: attachments.append({ 'path': file_entry['FileID'], 'name': file_entry['File'] }) incident['attachment'] = attachments incidents.append(incident) if incident_id == incidents_ids[-1]: last_incident_time = incident_creation_time last_incident_id = incident_id demisto.setLastRun( { 'last_fetched_event_iso': last_incident_time, 'last_incident_id': last_incident_id } ) demisto.incidents(incidents) " 34124,"def get_token(): clitoken = cmdline_args.token envtoken = os.environ[""RASA_NLU_TOKEN""] if conflicting_tokens(clitoken, envtoken): raise Exception( ""RASA_NLU_TOKEN is set both with the -t option,"" "" with value `{}`, and with and environment variable, "" ""with value `{}`. "" ""Please set the token with just one method "" ""to avoid unexpected behaviours."".format( clitoken, envtoken)) token = clitoken or envtoken return token ","def get_token(): clitoken = cmdline_args.token envtoken = os.environ[""RASA_NLU_TOKEN""] if clitoken and envtoken: raise Exception( ""RASA_NLU_TOKEN is set both with the -t option,"" "" with value `{}`, and with and environment variable, "" ""with value `{}`. "" ""Please set the token with just one method "" ""to avoid unexpected behaviours."".format( clitoken, envtoken)) token = clitoken or envtoken return token " 31694,"def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]: """""" Returns the results of the Parse IOCs from URL API call Args: client: IOCParser client to use args: All command arguments, ulr, limit and keys (if specified) Returns: CommandResults object containing the results of the parse from url as returned from the API and its readable output """""" url = args.get('url') keys = argToList(args.get('keys')) limit = args.get('limit') if not keys: keys = KEYS keys = list_to_upper_case(keys) if not url: raise ValueError('url not specified') response = client.ioc_from_url(url) response_data = process_response(response, keys, limit) command_results = [] outputs = {'url': url, 'Results': []} for key, values in response_data.items(): for value in values: outputs['Results'].append({'type': key, 'value': value}) for ioc_type, iocs in response_data.items(): command_results.append(CommandResults( readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type), outputs_prefix=f'IOCParser.parseFromUrl', outputs=outputs )) command_results.append(CommandResults( raw_response=response_data )) return command_results ","def ioc_from_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]: """""" Returns the results of the Parse IOCs from URL API call Args: client: IOCParser client to use args: All command arguments, url, limit, and keys (if specified). Returns: CommandResults object containing the results of the parse from url as returned from the API and its readable output """""" url = args.get('url') keys = argToList(args.get('keys')) limit = args.get('limit') if not keys: keys = KEYS keys = list_to_upper_case(keys) if not url: raise ValueError('url not specified') response = client.ioc_from_url(url) response_data = process_response(response, keys, limit) command_results = [] outputs = {'url': url, 'Results': []} for key, values in response_data.items(): for value in values: outputs['Results'].append({'type': key, 'value': value}) for ioc_type, iocs in response_data.items(): command_results.append(CommandResults( readable_output=tableToMarkdown(f'results for {ioc_type} from {url}', iocs, headers=ioc_type), outputs_prefix=f'IOCParser.parseFromUrl', outputs=outputs )) command_results.append(CommandResults( raw_response=response_data )) return command_results " 8435,"def _compute_line_flux(spectrum, regions=None): if regions is not None: calc_spectrum = extract_region(spectrum, regions) else: calc_spectrum = spectrum # Average dispersion in the line region avg_dx = (np.abs(np.diff(calc_spectrum.spectral_axis))).quantity # Account for the existence of a mask. if hasattr(spectrum, 'mask') and spectrum.mask is not None: # Cannot use unmasked values because of average dispersion. # Masked values must enter sum calculation valued as zeros. flux_ = np.where(calc_spectrum.mask, 0, calc_spectrum.flux)[1:] else: flux_ = calc_spectrum.flux[1:] line_flux = np.sum(flux_ * avg_dx) # TODO: we may want to consider converting to erg / cm^2 / sec by default return line_flux ","def _compute_line_flux(spectrum, regions=None): if regions is not None: calc_spectrum = extract_region(spectrum, regions) else: calc_spectrum = spectrum # Average dispersion in the line region avg_dx = (np.abs(np.diff(calc_spectrum.spectral_axis))).quantity # Account for the existence of a mask. if hasattr(spectrum, 'mask') and spectrum.mask is not None: # Cannot use unmasked values because of average dispersion. # Masked values must enter sum calculation valued as zeros. flux = np.where(calc_spectrum.mask, 0, calc_spectrum.flux)[1:] else: flux_ = calc_spectrum.flux[1:] line_flux = np.sum(flux_ * avg_dx) # TODO: we may want to consider converting to erg / cm^2 / sec by default return line_flux " 7430,"def stain_color_matrix(colors): """"""Creates a stain color matrix for a combination of stains. This routine knows some common stains, their colors are taken from other tools implementing stain unmixing, but will likely not exactly match the colors of the stains in your image. This is because the color of a stain depends on many factors, including the chemistry, the microscope light source, and the RGB camera capturing the image. It is always best to measure your stain colors. Known stains are: ""Hematoxylin"" ""Eosin"" ""DAB"" ""AEC"" ""Alcian Blue"" ""Aniline Blue"" ""Azocarmine"" ""FastBlue"" ""FastRed"" ""Feulgen"" ""Light Green"" ""Methyl Blue"" ""Methyl Green"" ""Orange-G"" ""PAS"" ""Ponceau Fuchsin"" See separate_stains() and combine_stains(). Parameters ---------- colors : iterable with 1 to 3 elements. Each element must be either a string for a known stain name (see below) or an RGB triplet in the form of an iterable. Returns ------- out : (..., 3) ndarray The stain color matrix, an Nx3 matrix, where N is the length of the input `colors`. Raises ------ ValueError If `colors` contains an unknown stain name or an illegal RGB triplet, or if `colors` is empty or has more than 3 elements. References ---------- .. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html """""" # Following matrices are adapted form the Java code written by G.Landini. # https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html # Similar values can be found in CellProfiler: # https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py stain_colors = { ""Hematoxylin"": (0.650, 0.704, 0.286), ""Eosin"": (0.092789, 0.954111, 0.283111), ""DAB"": (0.268, 0.570, 0.776), ""AEC"": (0.2743, 0.6796, 0.6803), ""Alcian Blue"": (0.552556, 0.7544, 0.353744), ""Aniline Blue"": (0.853033, 0.508733, 0.112656), ""Azocarmine"": (0.09289875, 0.8662008, 0.49098468), ""FastBlue"": (0.74890292, 0.60624161, 0.26731082), ""FastRed"": (0.21393921, 0.85112669, 0.47794022), ""Feulgen"": (0.46420921, 0.83008335, 0.30827187), ""Light Green"": (0.94705542, 0.25373821, 0.19650764), ""Methyl Blue"": (0.7995107, 0.5913521, 0.10528667), ""Methyl Green"": (0.98003, 0.144316, 0.133146), ""Orange-G"": (0.10732849, 0.36765403, 0.9237484), ""PAS"": (0.175411, 0.972178, 0.154589), ""Ponceau Fuchsin"": (0.09997159, 0.73738605, 0.6680326), } N = len(colors) if N < 1 or N > 3: msg = (f'the input `colors` must have between 1 and 3 elements, got {N}') raise ValueError(msg) out = np.zeros((N, 3)) for ii, val in enumerate(colors): if isinstance(val, str): if not val in stain_colors: msg = (f'the input `colors` contains {val}, which I do not recognize as a stain') raise ValueError(msg) val = stain_colors[val] else: if len(val) != 3 or not all(isinstance(v, float) for v in val): msg = (f'the input `colors` contains {val}, which is not an RGB triplet') raise ValueError(msg) norm = np.linalg.norm(val) val = [v / norm for v in val] out[ii, :] = val return out ","def stain_color_matrix(colors): """"""Creates a stain color matrix for a combination of stains. This routine knows some common stains, their colors are taken from other tools implementing stain unmixing, but will likely not exactly match the colors of the stains in your image. This is because the color of a stain depends on many factors, including the chemistry, the microscope light source, and the RGB camera capturing the image. It is always best to measure your stain colors. Known stains are: ""Hematoxylin"" ""Eosin"" ""DAB"" ""AEC"" ""Alcian Blue"" ""Aniline Blue"" ""Azocarmine"" ""FastBlue"" ""FastRed"" ""Feulgen"" ""Light Green"" ""Methyl Blue"" ""Methyl Green"" ""Orange-G"" ""PAS"" ""Ponceau Fuchsin"" See separate_stains() and combine_stains(). Parameters ---------- colors : iterable with 1 to 3 elements. Each element must be either a string for a known stain name (see below) or an RGB triplet in the form of an iterable. Returns ------- out : (..., 3) ndarray The stain color matrix, of shape (N, 3), where N is the length of the input `colors`. Raises ------ ValueError If `colors` contains an unknown stain name or an illegal RGB triplet, or if `colors` is empty or has more than 3 elements. References ---------- .. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html """""" # Following matrices are adapted form the Java code written by G.Landini. # https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html # Similar values can be found in CellProfiler: # https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py stain_colors = { ""Hematoxylin"": (0.650, 0.704, 0.286), ""Eosin"": (0.092789, 0.954111, 0.283111), ""DAB"": (0.268, 0.570, 0.776), ""AEC"": (0.2743, 0.6796, 0.6803), ""Alcian Blue"": (0.552556, 0.7544, 0.353744), ""Aniline Blue"": (0.853033, 0.508733, 0.112656), ""Azocarmine"": (0.09289875, 0.8662008, 0.49098468), ""FastBlue"": (0.74890292, 0.60624161, 0.26731082), ""FastRed"": (0.21393921, 0.85112669, 0.47794022), ""Feulgen"": (0.46420921, 0.83008335, 0.30827187), ""Light Green"": (0.94705542, 0.25373821, 0.19650764), ""Methyl Blue"": (0.7995107, 0.5913521, 0.10528667), ""Methyl Green"": (0.98003, 0.144316, 0.133146), ""Orange-G"": (0.10732849, 0.36765403, 0.9237484), ""PAS"": (0.175411, 0.972178, 0.154589), ""Ponceau Fuchsin"": (0.09997159, 0.73738605, 0.6680326), } N = len(colors) if N < 1 or N > 3: msg = (f'the input `colors` must have between 1 and 3 elements, got {N}') raise ValueError(msg) out = np.zeros((N, 3)) for ii, val in enumerate(colors): if isinstance(val, str): if not val in stain_colors: msg = (f'the input `colors` contains {val}, which I do not recognize as a stain') raise ValueError(msg) val = stain_colors[val] else: if len(val) != 3 or not all(isinstance(v, float) for v in val): msg = (f'the input `colors` contains {val}, which is not an RGB triplet') raise ValueError(msg) norm = np.linalg.norm(val) val = [v / norm for v in val] out[ii, :] = val return out " 41075,"def get_parser(): """"""Get parser of training arguments."""""" parser = configargparse.ArgumentParser( description='Train a new text-to-speech (TTS) model on one CPU, one or multiple GPUs', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) # general configuration parser.add('--config', is_config_file=True, help='config file path') parser.add('--config2', is_config_file=True, help='second config file path that overwrites the settings in `--config`.') parser.add('--config3', is_config_file=True, help='third config file path that overwrites the settings in `--config` and `--config2`.') parser.add_argument('--ngpu', default=None, type=int, help='Number of GPUs. If not given, use all visible devices') parser.add_argument('--backend', default='pytorch', type=str, choices=['chainer', 'pytorch'], help='Backend library') parser.add_argument('--outdir', type=str, required=True, help='Output directory') parser.add_argument('--debugmode', default=1, type=int, help='Debugmode') parser.add_argument('--seed', default=1, type=int, help='Random seed') parser.add_argument('--resume', '-r', default='', type=str, nargs='?', help='Resume the training from snapshot') parser.add_argument('--minibatches', '-N', type=int, default='-1', help='Process only N minibatches (for debug)') parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option') parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help=""Tensorboard log directory path"") parser.add_argument('--eval-interval-epochs', default=1, type=int, help=""Evaluation interval epochs"") parser.add_argument('--save-interval-epochs', default=1, type=int, help=""Save interval epochs"") parser.add_argument('--report-interval-iters', default=100, type=int, help=""Report interval iterations"") # task related parser.add_argument('--train-json', type=str, required=True, help='Filename of training json') parser.add_argument('--valid-json', type=str, required=True, help='Filename of validation json') # network architecture parser.add_argument('--model-module', type=str, default=""espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2"", help='model defined module') # minibatch related parser.add_argument('--sortagrad', default=0, type=int, nargs='?', help=""How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs"") parser.add_argument('--batch-sort-key', default='shuffle', type=str, choices=['shuffle', 'output', 'input'], nargs='?', help='Batch sorting key. ""shuffle"" only work with --batch-count ""seq"".') parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES, help='How to count batch_size. The default (auto) will find how to count by args.') parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int, help='Maximum seqs in a minibatch (0 to disable)') parser.add_argument('--batch-bins', default=0, type=int, help='Maximum bins in a minibatch (0 to disable)') parser.add_argument('--batch-frames-in', default=0, type=int, help='Maximum input frames in a minibatch (0 to disable)') parser.add_argument('--batch-frames-out', default=0, type=int, help='Maximum output frames in a minibatch (0 to disable)') parser.add_argument('--batch-frames-inout', default=0, type=int, help='Maximum input+output frames in a minibatch (0 to disable)') parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=100, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.') parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=200, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the output sequence length > ML') parser.add_argument('--num-iter-processes', default=0, type=int, help='Number of processes of iterator') parser.add_argument('--preprocess-conf', type=str, default=None, help='The configuration file for the pre-processing') parser.add_argument('--use-speaker-embedding', default=False, type=strtobool, help='Whether to use speaker embedding') parser.add_argument('--use-second-target', default=False, type=strtobool, help='Whether to use second target') # optimization related parser.add_argument('--opt', default='adam', type=str, choices=['adam', 'noam'], help='Optimizer') parser.add_argument('--accum-grad', default=1, type=int, help='Number of gradient accumuration') parser.add_argument('--lr', default=1e-3, type=float, help='Learning rate for optimizer') parser.add_argument('--eps', default=1e-6, type=float, help='Epsilon for optimizer') parser.add_argument('--weight-decay', default=1e-6, type=float, help='Weight decay coefficient for optimizer') parser.add_argument('--epochs', '-e', default=30, type=int, help='Number of maximum epochs') parser.add_argument('--early-stop-criterion', default='validation/main/loss', type=str, nargs='?', help=""Value to monitor to trigger an early stopping of the training"") parser.add_argument('--patience', default=3, type=int, nargs='?', help=""Number of epochs to wait without improvement before stopping the training"") parser.add_argument('--grad-clip', default=1, type=float, help='Gradient norm threshold to clip') parser.add_argument('--num-save-attention', default=5, type=int, help='Number of samples of attention to be saved') parser.add_argument('--keep-all-data-on-mem', default=False, type=strtobool, help='Whether to keep all data on memory') # finetuning related parser.add_argument('--enc-init', default=None, type=str, help='Pre-trained TTS model path to initialize encoder.') parser.add_argument('--enc-init-mods', default='enc.', type=lambda s: [str(mod) for mod in s.split(',') if s != ''], help='List of encoder modules to initialize, separated by a comma.') parser.add_argument('--dec-init', default=None, type=str, help='Pre-trained TTS model path to initialize decoder.') parser.add_argument('--dec-init-mods', default='dec.', type=lambda s: [str(mod) for mod in s.split(',') if s != ''], help='List of decoder modules to initialize, separated by a comma.') return parser ","def get_parser(): """"""Get parser of training arguments."""""" parser = configargparse.ArgumentParser( description='Train a new text-to-speech (TTS) model on one CPU, one or multiple GPUs', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) # general configuration parser.add('--config', is_config_file=True, help='config file path') parser.add('--config2', is_config_file=True, help='second config file path that overwrites the settings in `--config`.') parser.add('--config3', is_config_file=True, help='third config file path that overwrites the settings in `--config` and `--config2`.') parser.add_argument('--ngpu', default=None, type=int, help='Number of GPUs. If not given, use all visible devices') parser.add_argument('--backend', default='pytorch', type=str, choices=['chainer', 'pytorch'], help='Backend library') parser.add_argument('--outdir', type=str, required=True, help='Output directory') parser.add_argument('--debugmode', default=1, type=int, help='Debugmode') parser.add_argument('--seed', default=1, type=int, help='Random seed') parser.add_argument('--resume', '-r', default='', type=str, nargs='?', help='Resume the training from snapshot') parser.add_argument('--minibatches', '-N', type=int, default='-1', help='Process only N minibatches (for debug)') parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option') parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help=""Tensorboard log directory path"") parser.add_argument('--eval-interval-epochs', default=1, type=int, help=""Evaluation interval epochs"") parser.add_argument('--save-interval-epochs', default=1, type=int, help=""Save interval epochs"") parser.add_argument('--report-interval-iters', default=100, type=int, help=""Report interval iterations"") # task related parser.add_argument('--train-json', type=str, required=True, help='Filename of training json') parser.add_argument('--valid-json', type=str, required=True, help='Filename of validation json') # network architecture parser.add_argument('--model-module', type=str, default=""espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2"", help='model defined module') # minibatch related parser.add_argument('--sortagrad', default=0, type=int, nargs='?', help=""How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs"") parser.add_argument('--batch-sort-key', default='shuffle', type=str, choices=['shuffle', 'output', 'input'], nargs='?', help='Batch sorting key. ""shuffle"" only work with --batch-count ""seq"".') parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES, help='How to count batch_size. The default (auto) will find how to count by args.') parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int, help='Maximum seqs in a minibatch (0 to disable)') parser.add_argument('--batch-bins', default=0, type=int, help='Maximum bins in a minibatch (0 to disable)') parser.add_argument('--batch-frames-in', default=0, type=int, help='Maximum input frames in a minibatch (0 to disable)') parser.add_argument('--batch-frames-out', default=0, type=int, help='Maximum output frames in a minibatch (0 to disable)') parser.add_argument('--batch-frames-inout', default=0, type=int, help='Maximum input+output frames in a minibatch (0 to disable)') parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=100, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.') parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=200, type=int, metavar='ML', help='When --batch-count=seq, batch size is reduced if the output sequence length > ML') parser.add_argument('--num-iter-processes', default=0, type=int, help='Number of processes of iterator') parser.add_argument('--preprocess-conf', type=str, default=None, help='The configuration file for the pre-processing') parser.add_argument('--use-speaker-embedding', default=False, type=strtobool, help='Whether to use speaker embedding') parser.add_argument('--use-second-target', default=False, type=strtobool, help='Whether to use second target') # optimization related parser.add_argument('--opt', default='adam', type=str, choices=['adam', 'noam'], help='Optimizer') parser.add_argument('--accum-grad', default=1, type=int, help='Number of gradient accumuration') parser.add_argument('--lr', default=1e-3, type=float, help='Learning rate for optimizer') parser.add_argument('--eps', default=1e-6, type=float, help='Epsilon for optimizer') parser.add_argument('--weight-decay', default=1e-6, type=float, help='Weight decay coefficient for optimizer') parser.add_argument('--epochs', '-e', default=30, type=int, help='Number of maximum epochs') parser.add_argument('--early-stop-criterion', default='validation/main/loss', type=str, nargs='?', help=""Value to monitor to trigger an early stopping of the training"") parser.add_argument('--patience', default=3, type=int, nargs='?', help=""Number of epochs to wait without improvement before stopping the training"") parser.add_argument('--grad-clip', default=1, type=float, help='Gradient norm threshold to clip') parser.add_argument('--num-save-attention', default=5, type=int, help='Number of samples of attention to be saved') parser.add_argument('--keep-all-data-on-mem', default=False, type=strtobool, help='Whether to keep all data on memory') # finetuning related parser.add_argument('--enc-init', default=None, type=str, help='Pre-trained TTS model path to initialize encoder.') parser.add_argument('--enc-init-mods', default='enc.', type=lambda s: [str(mod) for mod in s.split(',') if s != ''], help='List of encoder modules to initialize, separated by a comma.') parser.add_argument('--dec-init', default=None, type=str, help='Pre-trained TTS model path to initialize decoder.') parser.add_argument('--dec-init-mods', default='dec.,att.', type=lambda s: [str(mod) for mod in s.split(',') if s != ''], help='List of decoder modules to initialize, separated by a comma.') return parser " 33918,"def recsim_gym_wrapper( recsim_gym_env: gym.Env, convert_to_discrete_action_space: bool = False, wrap_for_bandits=False, ) -> gym.Env: """"""Makes sure a RecSim gym.Env can ba handled by RLlib. In RecSim's observation spaces, the ""doc"" field is a dictionary keyed by document IDs. Those IDs are changing every step, thus generating a different observation space in each time. This causes issues for RLlib because it expects the observation space to remain the same across steps. Also, RecSim's reset() function returns an observation without the ""response"" field, breaking RLlib's check. This wrapper fixes that by assigning a random ""response"". Args: recsim_gym_env: The RecSim gym.Env instance. Usually resulting from a raw RecSim env having been passed through RecSim's utility function: `recsim.simulator.recsim_gym.RecSimGymEnv()`. convert_to_discrete_action_space: Optional bool indicating, whether the action space of the created env class should be Discrete (rather than MultiDiscrete, even if slate size > 1). This is useful for algorithms that don't support MultiDiscrete action spaces, such as RLlib's DQN. If None, `convert_to_discrete_action_space` may also be provided via the EnvContext (config) when creating an actual env instance. wrap_for_bandits: Optional bool indicating, whether this RecSim env should be wrapped for use with our Bandits agent. Returns: An RLlib-ready gym.Env instance. """""" env = RecSimResetWrapper(recsim_gym_env) env = RecSimObservationSpaceWrapper(env) if convert_to_discrete_action_space: env = MultiDiscreteToDiscreteActionWrapper(env) if wrap_for_bandits: env = RecSimObservationBanditWrapper(env) return env ","def recsim_gym_wrapper( recsim_gym_env: gym.Env, convert_to_discrete_action_space: bool = False, wrap_for_bandits: bool = False, ) -> gym.Env: """"""Makes sure a RecSim gym.Env can ba handled by RLlib. In RecSim's observation spaces, the ""doc"" field is a dictionary keyed by document IDs. Those IDs are changing every step, thus generating a different observation space in each time. This causes issues for RLlib because it expects the observation space to remain the same across steps. Also, RecSim's reset() function returns an observation without the ""response"" field, breaking RLlib's check. This wrapper fixes that by assigning a random ""response"". Args: recsim_gym_env: The RecSim gym.Env instance. Usually resulting from a raw RecSim env having been passed through RecSim's utility function: `recsim.simulator.recsim_gym.RecSimGymEnv()`. convert_to_discrete_action_space: Optional bool indicating, whether the action space of the created env class should be Discrete (rather than MultiDiscrete, even if slate size > 1). This is useful for algorithms that don't support MultiDiscrete action spaces, such as RLlib's DQN. If None, `convert_to_discrete_action_space` may also be provided via the EnvContext (config) when creating an actual env instance. wrap_for_bandits: Optional bool indicating, whether this RecSim env should be wrapped for use with our Bandits agent. Returns: An RLlib-ready gym.Env instance. """""" env = RecSimResetWrapper(recsim_gym_env) env = RecSimObservationSpaceWrapper(env) if convert_to_discrete_action_space: env = MultiDiscreteToDiscreteActionWrapper(env) if wrap_for_bandits: env = RecSimObservationBanditWrapper(env) return env " 53473,"def iterator_suffix(iterator, stop: int): for i, item in enumerate(iterator): if i < stop: continue yield item ","def even_number_under(n: int): for i in range(n): if i%2 == 1: # [no-else-continue] continue yield i " 31400,"def get_markdown(object_type: str, objects=None): """""" Getting markdown for object type to display the results in human readable format. :type object_type: ``str`` :param object_type: Type of IdentityNow object. :type objects: ``dict`` or ``list`` :param objects: Single or list of Identity resources object/s. :return: Markdown for each object type. """""" markdown = '' if object_type == 'IdentityNow.Identity': headers = ['id', 'name', 'displayName', 'firstName', 'lastName', 'email', 'created', 'modified', 'inactive', 'protected', 'status', 'isManager', 'identityProfile', 'source', 'attributes', 'accounts', 'accountCount', 'appCount', 'accessCount', 'entitlementCount', 'roleCount', 'accessProfileCount', 'pod', 'org', 'type'] markdown = tableToMarkdown('Identity(Identities)', objects, headers=headers) elif object_type == 'IdentityNow.Account': headers = ['id', 'name', 'identityId', 'nativeIdentity', 'sourceId', 'created', 'modified', 'attributes', 'authoritative', 'disabled', 'locked', 'systemAccount', 'uncorrelated', 'manuallyCorrelated', 'hasEntitlements'] markdown = tableToMarkdown('Account(s)', objects, headers=headers) elif object_type == 'IdentityNow.AccountActivity': headers = ['id', 'name', 'created', 'modified', 'completed', 'completionStatus', 'type', 'requesterIdentitySummary', 'targetIdentitySummary', 'items', 'executionStatus', 'cancelable', 'cancelComment'] markdown = tableToMarkdown('Account Activity(Account Activities)', objects, headers=headers) elif object_type == 'IdentityNow.AccessProfile': headers = ['id', 'name', 'description', 'source', 'entitlements', 'entitlementCount', 'created', 'modified', 'synced', 'enabled', 'requestable', 'requestCommentsRequired', 'owner', 'pod', 'org', 'type'] markdown = tableToMarkdown('Access Profile(s)', objects, headers=headers) elif object_type == 'IdentityNow.Role': headers = ['id', 'name', 'description', 'accessProfiles', 'accessProfileCount', 'created', 'modified', 'synced', 'enabled', 'requestable', 'requestCommentsRequired', 'owner', 'pod', 'org', 'type'] markdown = tableToMarkdown('Role(s)', objects, headers=headers) elif object_type == 'IdentityNow.Entitlement': headers = ['id', 'name', 'displayName', 'description', 'modified', 'synced', 'source', 'privileged', 'identityCount', 'attribute', 'value', 'schema', 'pod', 'org', 'type'] markdown = tableToMarkdown('Entitlement(s)', objects, headers=headers) elif object_type == 'IdentityNow.Event': headers = ['id', 'name', 'stack', 'created', 'synced', 'objects', 'ipAddress', 'technicalName', 'target', 'actor', 'action', 'attributes', 'operation', 'status', 'pod', 'org', 'type'] markdown = tableToMarkdown('Event(s)', objects, headers=headers) return markdown ","def get_markdown(object_type: str, objects=None): """""" Getting markdown for object type to display the results in human readable format. :type object_type: ``str`` :param object_type: Type of IdentityNow object. :type objects: ``dict`` or ``list`` :param objects: Single or list of Identity resources object/s. :return: Markdown for each object type. """""" markdown = '' if object_type == 'IdentityNow.Identity': headers = ['id', 'name', 'displayName', 'firstName', 'lastName', 'email', 'created', 'modified', 'inactive', 'protected', 'status', 'isManager', 'identityProfile', 'source', 'attributes', 'accounts', 'accountCount', 'appCount', 'accessCount', 'entitlementCount', 'roleCount', 'accessProfileCount', 'pod', 'org', 'type'] markdown = tableToMarkdown('Identity(Identities)', objects, headers=headers, removeNull=True) elif object_type == 'IdentityNow.Account': headers = ['id', 'name', 'identityId', 'nativeIdentity', 'sourceId', 'created', 'modified', 'attributes', 'authoritative', 'disabled', 'locked', 'systemAccount', 'uncorrelated', 'manuallyCorrelated', 'hasEntitlements'] markdown = tableToMarkdown('Account(s)', objects, headers=headers) elif object_type == 'IdentityNow.AccountActivity': headers = ['id', 'name', 'created', 'modified', 'completed', 'completionStatus', 'type', 'requesterIdentitySummary', 'targetIdentitySummary', 'items', 'executionStatus', 'cancelable', 'cancelComment'] markdown = tableToMarkdown('Account Activity(Account Activities)', objects, headers=headers) elif object_type == 'IdentityNow.AccessProfile': headers = ['id', 'name', 'description', 'source', 'entitlements', 'entitlementCount', 'created', 'modified', 'synced', 'enabled', 'requestable', 'requestCommentsRequired', 'owner', 'pod', 'org', 'type'] markdown = tableToMarkdown('Access Profile(s)', objects, headers=headers) elif object_type == 'IdentityNow.Role': headers = ['id', 'name', 'description', 'accessProfiles', 'accessProfileCount', 'created', 'modified', 'synced', 'enabled', 'requestable', 'requestCommentsRequired', 'owner', 'pod', 'org', 'type'] markdown = tableToMarkdown('Role(s)', objects, headers=headers) elif object_type == 'IdentityNow.Entitlement': headers = ['id', 'name', 'displayName', 'description', 'modified', 'synced', 'source', 'privileged', 'identityCount', 'attribute', 'value', 'schema', 'pod', 'org', 'type'] markdown = tableToMarkdown('Entitlement(s)', objects, headers=headers) elif object_type == 'IdentityNow.Event': headers = ['id', 'name', 'stack', 'created', 'synced', 'objects', 'ipAddress', 'technicalName', 'target', 'actor', 'action', 'attributes', 'operation', 'status', 'pod', 'org', 'type'] markdown = tableToMarkdown('Event(s)', objects, headers=headers) return markdown " 23016,"def eye(N, chunks='auto', M=None, k=0, dtype=float): """""" Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks: int chunk size of resulting blocks M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. """""" eye = {} if M is None: M = N if not isinstance(chunks, (int, str)): raise ValueError('chunks must be an int or string') elif isinstance(chunks, str): chunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = chunks[0][0] token = tokenize(N, chunk, M, k, dtype) name_eye = 'eye-' + token vchunks = [chunks] * (N // chunks) if N % chunks != 0: vchunks.append(N % chunks) hchunks = [chunks] * (M // chunks) if M % chunks != 0: hchunks.append(M % chunks) for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = (np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) ","def eye(N, chunks='auto', M=None, k=0, dtype=float): """""" Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks: int chunk size of resulting blocks M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. """""" eye = {} if M is None: M = N if not isinstance(chunks, (int, str)): raise ValueError('chunks must be an int or string') elif isinstance(chunks, str): chunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = chunks[0][0] token = tokenize(N, chunks, M, k, dtype) name_eye = 'eye-' + token vchunks = [chunks] * (N // chunks) if N % chunks != 0: vchunks.append(N % chunks) hchunks = [chunks] * (M // chunks) if M % chunks != 0: hchunks.append(M % chunks) for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = (np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) " 31485,"def command_test_module(credentials: Dict) -> str: message: str = '' try: api = CBCloudAPI(**credentials) api.api_json_request(method='GET', uri='/integrationServices/v3/cblr/session/') message = 'ok' except errors.UnauthorizedError: message = 'Authorization Error: Check your API Credentials' except Exception as e: exception_str = str(e) if 'connection error' in exception_str: message = 'Connection Error: Check your Server Url' else: raise e return message ","def command_test_module(credentials: Dict) -> str: message: str = '' try: api = CBCloudAPI(**credentials) api.api_json_request(method='GET', uri='/integrationServices/v3/cblr/session/') message = 'ok' except errors.UnauthorizedError: return_error('Authorization Error: Check your API Credentials') except Exception as e: exception_str = str(e) if 'connection error' in exception_str: message = 'Connection Error: Check your Server Url' else: raise e return message " 5448,"def replace( path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup="".bak"", dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, backslash_literal=False, ): """""" .. versionadded:: 0.17.0 Replace occurrences of a pattern in a file. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returned when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. path Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count: 0 Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int) A list of flags defined in the ``re`` module documentation from the Python standard library. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str) How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found: False .. versionadded:: 2014.7.0 If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found: False .. versionadded:: 2014.7.0 If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content .. versionadded:: 2014.7.0 Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. backup: .bak The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. dry_run: False If set to ``True``, no changes will be made to the file, the function will just return the changes that would have been made (or a ``True``/``False`` value if ``show_changes`` is set to ``False``). search_only: False If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes: True If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. .. note:: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing: False .. versionadded:: 2015.8.0 If set to ``True``, this function will simply return ``False`` if the file doesn't exist. Otherwise, an error will be thrown. preserve_inode: True .. versionadded:: 2015.8.0 Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). backslash_literal: False .. versionadded:: 2016.11.7 Interpret backslashes as literal backslashes for the repl and not escape characters. This will help when using append/prepend so that the backslashes are not interpreted for the repl on the second run of the state. If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.replace /path/to/file pattern='=' repl=':' salt '*' file.replace /path/to/file pattern=""bind-address\\s*="" repl='bind-address:' CLI Examples: .. code-block:: bash salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info' salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]' """""" symlink = False if is_link(path): symlink = True target_path = salt.utils.path.readlink(path) given_path = os.path.expanduser(path) path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): if ignore_if_missing: return False else: raise SaltInvocationError(""File not found: {}"".format(path)) if not __utils__[""files.is_text""](path): raise SaltInvocationError( ""Cannot perform string replacements on a binary file: {}"".format(path) ) if search_only and (append_if_not_found or prepend_if_not_found): raise SaltInvocationError( ""search_only cannot be used with append/prepend_if_not_found"" ) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( ""Only one of append and prepend_if_not_found is permitted"" ) flags_num = _get_flags(flags) cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == ""file"": bufsize = filesize # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] # used for show_changes and change detection new_file = [] # used for show_changes and change detection if not salt.utils.platform.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.files.normalize_mode(get_mode(path)) # Avoid TypeErrors by forcing repl to be bytearray related to mmap # Replacement text may contains integer: 123 for example repl = salt.utils.stringutils.to_bytes(str(repl)) if not_found_content: not_found_content = salt.utils.stringutils.to_bytes(not_found_content) found = False temp_file = None content = ( salt.utils.stringutils.to_unicode(not_found_content) if not_found_content and (prepend_if_not_found or append_if_not_found) else salt.utils.stringutils.to_unicode(repl) ) try: # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes r_data = None # Use a read-only handle to open the file with salt.utils.files.fopen(path, mode=""rb"", buffering=bufsize) as r_file: try: # mmap throws a ValueError if the file is empty. r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) except (ValueError, OSError): # size of file in /proc is 0, but contains data r_data = salt.utils.stringutils.to_bytes("""".join(r_file)) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: return False else: result, nrepl = re.subn( cpattern, repl.replace(""\\"", ""\\\\"") if backslash_literal else repl, r_data, count, ) # found anything? (even if no change) if nrepl > 0: found = True # Identity check the potential change has_changes = True if pattern != repl else has_changes if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. if re.search( salt.utils.stringutils.to_bytes( ""^{}($|(?=\r\n))"".format(re.escape(content)) ), r_data, flags=flags_num, ): # Content was found, so set found. found = True orig_file = ( r_data.read(filesize).splitlines(True) if isinstance(r_data, mmap.mmap) else r_data.splitlines(True) ) new_file = result.splitlines(True) if orig_file == new_file: has_changes = False except OSError as exc: raise CommandExecutionError( ""Unable to open file '{}'. Exception: {}"".format(path, exc) ) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() if has_changes and not dry_run: # Write the replacement text in this block. try: # Create a copy to read from and to use as a backup later temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) r_data = None try: # Open the file in write mode with salt.utils.files.fopen(path, mode=""w"", buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.files.fopen( temp_file, mode=""r"", buffering=bufsize ) as r_file: r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) result, nrepl = re.subn( cpattern, repl.replace(""\\"", ""\\\\"") if backslash_literal else repl, r_data, count, ) try: w_file.write(salt.utils.stringutils.to_str(result)) except OSError as exc: raise CommandExecutionError( ""Unable to write file '{}'. Contents may "" ""be truncated. Temporary file contains copy "" ""at '{}'. "" ""Exception: {}"".format(path, temp_file, exc) ) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) if not found and (append_if_not_found or prepend_if_not_found): if not_found_content is None: not_found_content = repl if prepend_if_not_found: new_file.insert( 0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep) ) else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith( salt.utils.stringutils.to_bytes(os.linesep) ): new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep) new_file.append( not_found_content + salt.utils.stringutils.to_bytes(os.linesep) ) has_changes = True if not dry_run: try: # Create a copy to read from and for later use as a backup temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, ""wb"") for line in new_file: fh_.write(salt.utils.stringutils.to_bytes(line)) finally: fh_.close() if backup and has_changes and not dry_run: # keep the backup only if it was requested # and only if there were any changes backup_name = ""{}{}"".format(path, backup) try: shutil.move(temp_file, backup_name) except OSError as exc: raise CommandExecutionError( ""Unable to move the temp file '{}' to the "" ""backup file '{}'. "" ""Exception: {}"".format(path, temp_file, exc) ) if symlink: symlink_backup = ""{}{}"".format(given_path, backup) target_backup = ""{}{}"".format(target_path, backup) # Always clobber any existing symlink backup # to match the behaviour of the 'backup' option try: os.symlink(target_backup, symlink_backup) except OSError: os.remove(symlink_backup) os.symlink(target_backup, symlink_backup) except Exception: # pylint: disable=broad-except raise CommandExecutionError( ""Unable create backup symlink '{}'. "" ""Target was '{}'. "" ""Exception: {}"".format(symlink_backup, target_backup, exc) ) elif temp_file: try: os.remove(temp_file) except OSError as exc: raise CommandExecutionError( ""Unable to delete temp file '{}'. Exception: {}"".format(temp_file, exc) ) if not dry_run and not salt.utils.platform.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) differences = __utils__[""stringutils.get_diff""](orig_file, new_file) if show_changes: return differences # We may have found a regex line match but don't need to change the line # (for situations where the pattern also matches the repl). Revert the # has_changes flag to False if the final result is unchanged. if not differences: has_changes = False return has_changes ","def replace( path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup="".bak"", dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, backslash_literal=False, ): """""" .. versionadded:: 0.17.0 Replace occurrences of a pattern in a file. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returned when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. path Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count: 0 Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int) A list of flags defined in the ``re`` module documentation from the Python standard library. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str) How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found: False .. versionadded:: 2014.7.0 If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found: False .. versionadded:: 2014.7.0 If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content .. versionadded:: 2014.7.0 Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. backup: .bak The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. dry_run: False If set to ``True``, no changes will be made to the file, the function will just return the changes that would have been made (or a ``True``/``False`` value if ``show_changes`` is set to ``False``). search_only: False If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes: True If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. .. note:: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing: False .. versionadded:: 2015.8.0 If set to ``True``, this function will simply return ``False`` if the file doesn't exist. Otherwise, an error will be thrown. preserve_inode: True .. versionadded:: 2015.8.0 Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). backslash_literal: False .. versionadded:: 2016.11.7 Interpret backslashes as literal backslashes for the repl and not escape characters. This will help when using append/prepend so that the backslashes are not interpreted for the repl on the second run of the state. If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.replace /path/to/file pattern='=' repl=':' salt '*' file.replace /path/to/file pattern=""bind-address\\s*="" repl='bind-address:' CLI Examples: .. code-block:: bash salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info' salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]' """""" symlink = False if is_link(path): symlink = True target_path = salt.utils.path.readlink(path) given_path = os.path.expanduser(path) path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): if ignore_if_missing: return False else: raise SaltInvocationError(""File not found: {}"".format(path)) if not __utils__[""files.is_text""](path): raise SaltInvocationError( ""Cannot perform string replacements on a binary file: {}"".format(path) ) if search_only and (append_if_not_found or prepend_if_not_found): raise SaltInvocationError( ""search_only cannot be used with append/prepend_if_not_found"" ) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( ""Only one of append and prepend_if_not_found is permitted"" ) flags_num = _get_flags(flags) cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == ""file"": bufsize = filesize # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] # used for show_changes and change detection new_file = [] # used for show_changes and change detection if not salt.utils.platform.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.files.normalize_mode(get_mode(path)) # Avoid TypeErrors by forcing repl to be bytearray related to mmap # Replacement text may contains integer: 123 for example repl = salt.utils.stringutils.to_bytes(str(repl)) if not_found_content: not_found_content = salt.utils.stringutils.to_bytes(not_found_content) found = False temp_file = None content = ( salt.utils.stringutils.to_unicode(not_found_content) if not_found_content and (prepend_if_not_found or append_if_not_found) else salt.utils.stringutils.to_unicode(repl) ) try: # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes r_data = None # Use a read-only handle to open the file with salt.utils.files.fopen(path, mode=""rb"", buffering=bufsize) as r_file: try: # mmap throws a ValueError if the file is empty. r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) except (ValueError, OSError): # size of file in /proc is 0, but contains data r_data = salt.utils.stringutils.to_bytes("""".join(r_file)) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: return False else: result, nrepl = re.subn( cpattern, repl.replace(""\\"", ""\\\\"") if backslash_literal else repl, r_data, count, ) # found anything? (even if no change) if nrepl > 0: found = True # Identity check the potential change has_changes = True if pattern != repl else has_changes if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. if re.search( salt.utils.stringutils.to_bytes( ""^{}($|(?=\r\n))"".format(re.escape(content)) ), r_data, flags=flags_num, ): # Content was found, so set found. found = True orig_file = ( r_data.read(filesize).splitlines(True) if isinstance(r_data, mmap.mmap) else r_data.splitlines(True) ) new_file = result.splitlines(True) has_changes = orig_file != new_file except OSError as exc: raise CommandExecutionError( ""Unable to open file '{}'. Exception: {}"".format(path, exc) ) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() if has_changes and not dry_run: # Write the replacement text in this block. try: # Create a copy to read from and to use as a backup later temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) r_data = None try: # Open the file in write mode with salt.utils.files.fopen(path, mode=""w"", buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.files.fopen( temp_file, mode=""r"", buffering=bufsize ) as r_file: r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) result, nrepl = re.subn( cpattern, repl.replace(""\\"", ""\\\\"") if backslash_literal else repl, r_data, count, ) try: w_file.write(salt.utils.stringutils.to_str(result)) except OSError as exc: raise CommandExecutionError( ""Unable to write file '{}'. Contents may "" ""be truncated. Temporary file contains copy "" ""at '{}'. "" ""Exception: {}"".format(path, temp_file, exc) ) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) if not found and (append_if_not_found or prepend_if_not_found): if not_found_content is None: not_found_content = repl if prepend_if_not_found: new_file.insert( 0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep) ) else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith( salt.utils.stringutils.to_bytes(os.linesep) ): new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep) new_file.append( not_found_content + salt.utils.stringutils.to_bytes(os.linesep) ) has_changes = True if not dry_run: try: # Create a copy to read from and for later use as a backup temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except OSError as exc: raise CommandExecutionError(""Exception: {}"".format(exc)) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, ""wb"") for line in new_file: fh_.write(salt.utils.stringutils.to_bytes(line)) finally: fh_.close() if backup and has_changes and not dry_run: # keep the backup only if it was requested # and only if there were any changes backup_name = ""{}{}"".format(path, backup) try: shutil.move(temp_file, backup_name) except OSError as exc: raise CommandExecutionError( ""Unable to move the temp file '{}' to the "" ""backup file '{}'. "" ""Exception: {}"".format(path, temp_file, exc) ) if symlink: symlink_backup = ""{}{}"".format(given_path, backup) target_backup = ""{}{}"".format(target_path, backup) # Always clobber any existing symlink backup # to match the behaviour of the 'backup' option try: os.symlink(target_backup, symlink_backup) except OSError: os.remove(symlink_backup) os.symlink(target_backup, symlink_backup) except Exception: # pylint: disable=broad-except raise CommandExecutionError( ""Unable create backup symlink '{}'. "" ""Target was '{}'. "" ""Exception: {}"".format(symlink_backup, target_backup, exc) ) elif temp_file: try: os.remove(temp_file) except OSError as exc: raise CommandExecutionError( ""Unable to delete temp file '{}'. Exception: {}"".format(temp_file, exc) ) if not dry_run and not salt.utils.platform.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) differences = __utils__[""stringutils.get_diff""](orig_file, new_file) if show_changes: return differences # We may have found a regex line match but don't need to change the line # (for situations where the pattern also matches the repl). Revert the # has_changes flag to False if the final result is unchanged. if not differences: has_changes = False return has_changes " 22563,"def _handle_offline_mode(code, return_value): msg = ( ""This script is being executed in offline mode and cannot connect to the database. "" f""Therefore, `{code}` returns `{return_value}` by default."" ) log.debug(msg) return return_value ","def _handle_offline_mode(code, return_value): msg = ( ""This script is being executed in offline mode and cannot connect to the database. "" f""Therefore, `{code}` returns `{return_value}` by default."" ) log.warning(msg) return return_value " 40450,"def test_attention_aggregation(): channels, dim_size = (32, 10) gate_nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, 1)) nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels)) aggr = AttentionAggregation(gate_nn, nn) assert aggr.__repr__() == ( 'AttentionAggregation(gate_nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=1, bias=True)\n' '), nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=32, bias=True)\n' '))') x = torch.randn((dim_size**2, channels)) index = torch.arange(dim_size, dtype=torch.long) index = index.view(-1, 1).repeat(1, dim_size).view(-1) assert aggr(x, index).size() == (dim_size, channels) assert aggr(x, index, dim_size=dim_size + 1).size() == (dim_size + 1, channels) # test depreciated aggr = GlobalAttention(gate_nn, nn) assert aggr(x, index).size() == (dim_size, channels) assert aggr(x, index, dim_size + 1).size() == (dim_size + 1, channels) ","def test_attention_aggregation(): channels, dim_size = (32, 10) gate_nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, 1)) nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels)) aggr = AttentionAggregation(gate_nn, nn) assert str(aggr) == ( 'AttentionAggregation(gate_nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=1, bias=True)\n' '), nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=32, bias=True)\n' '))') x = torch.randn((dim_size**2, channels)) index = torch.arange(dim_size, dtype=torch.long) index = index.view(-1, 1).repeat(1, dim_size).view(-1) assert aggr(x, index).size() == (dim_size, channels) assert aggr(x, index, dim_size=dim_size + 1).size() == (dim_size + 1, channels) # test depreciated aggr = GlobalAttention(gate_nn, nn) assert aggr(x, index).size() == (dim_size, channels) assert aggr(x, index, dim_size + 1).size() == (dim_size + 1, channels) " 15568,"def recorder_save_worker(file_out: str, segments: Deque[Segment]): """"""Handle saving stream."""""" if not segments: _LOGGER.error(""Recording failed to capture anything."") return if not os.path.exists(os.path.dirname(file_out)): os.makedirs(os.path.dirname(file_out), exist_ok=True) pts_adjuster = {""video"": None, ""audio"": None} output = None output_v = None output_a = None last_stream_id = None # The running duration of processed segments. Note that this is in av.time_base # units which seem to be defined inversely to how stream time_bases are defined running_duration = 0 last_sequence = float(""-inf"") for segment in segments: # Because the stream_worker is in a different thread from the record service, # the lookback segments may still have some overlap with the recorder segments if segment.sequence <= last_sequence: continue last_sequence = segment.sequence # Open segment source = av.open(segment.segment, ""r"", format=SEGMENT_CONTAINER_FORMAT) source_v = source.streams.video[0] source_a = source.streams.audio[0] if len(source.streams.audio) > 0 else None # Create output on first segment if not output: output = av.open( file_out, ""w"", format=RECORDER_CONTAINER_FORMAT, container_options={ ""video_track_timescale"": str(int(1 / source_v.time_base)) }, ) # Add output streams if necessary if not output_v: output_v = output.add_stream(template=source_v) context = output_v.codec_context context.flags |= ""GLOBAL_HEADER"" if source_a and not output_a: output_a = output.add_stream(template=source_a) # Recalculate pts adjustments on first segment and on any discontinuity # We are assuming time base is the same across all discontinuities if last_stream_id != segment.stream_id: last_stream_id = segment.stream_id pts_adjuster[""video""] = int( (running_duration - source.start_time) / (av.time_base * source_v.time_base) ) if source_a: pts_adjuster[""audio""] = int( (running_duration - source.start_time) / (av.time_base * source_a.time_base) ) # Remux video for packet in source.demux(): if packet.dts is None: continue packet.pts += pts_adjuster[packet.stream.type] packet.dts += pts_adjuster[packet.stream.type] packet.stream = output_v if packet.stream.type == ""video"" else output_a output.mux(packet) running_duration += source.duration - source.start_time source.close() output.close() ","def recorder_save_worker(file_out: str, segments: Deque[Segment]): """"""Handle saving stream."""""" if not segments: _LOGGER.error(""Recording failed to capture anything"") return if not os.path.exists(os.path.dirname(file_out)): os.makedirs(os.path.dirname(file_out), exist_ok=True) pts_adjuster = {""video"": None, ""audio"": None} output = None output_v = None output_a = None last_stream_id = None # The running duration of processed segments. Note that this is in av.time_base # units which seem to be defined inversely to how stream time_bases are defined running_duration = 0 last_sequence = float(""-inf"") for segment in segments: # Because the stream_worker is in a different thread from the record service, # the lookback segments may still have some overlap with the recorder segments if segment.sequence <= last_sequence: continue last_sequence = segment.sequence # Open segment source = av.open(segment.segment, ""r"", format=SEGMENT_CONTAINER_FORMAT) source_v = source.streams.video[0] source_a = source.streams.audio[0] if len(source.streams.audio) > 0 else None # Create output on first segment if not output: output = av.open( file_out, ""w"", format=RECORDER_CONTAINER_FORMAT, container_options={ ""video_track_timescale"": str(int(1 / source_v.time_base)) }, ) # Add output streams if necessary if not output_v: output_v = output.add_stream(template=source_v) context = output_v.codec_context context.flags |= ""GLOBAL_HEADER"" if source_a and not output_a: output_a = output.add_stream(template=source_a) # Recalculate pts adjustments on first segment and on any discontinuity # We are assuming time base is the same across all discontinuities if last_stream_id != segment.stream_id: last_stream_id = segment.stream_id pts_adjuster[""video""] = int( (running_duration - source.start_time) / (av.time_base * source_v.time_base) ) if source_a: pts_adjuster[""audio""] = int( (running_duration - source.start_time) / (av.time_base * source_a.time_base) ) # Remux video for packet in source.demux(): if packet.dts is None: continue packet.pts += pts_adjuster[packet.stream.type] packet.dts += pts_adjuster[packet.stream.type] packet.stream = output_v if packet.stream.type == ""video"" else output_a output.mux(packet) running_duration += source.duration - source.start_time source.close() output.close() " 39132,"def dcshift( waveform: Tensor, shift: float, limiter_gain: float = 0. ) -> Tensor: r""""""Apply a DC shift to the audio. Similar to SoX implementation. This can be useful to remove a DC offset (caused perhaps by a hardware problem in the recording chain) from the audio Args: waveform (Tensor): audio waveform of dimension of `(..., time)` shift (float): indicates the amount to shift the audio Allowed range of values for shift : -2.0 to +2.0 limiter_gain (float): It is used only on peaks to prevent clipping It should have a value much less than 1 (e.g. 0.05 or 0.02) Returns: Tensor: Waveform of dimension of `(..., time)` References: http://sox.sourceforge.net/sox.html """""" output_waveform = waveform limiter_threshold = 0. use_limiter = False if limiter_gain != 0.0: use_limiter = True limiter_threshold = 1.0 - (abs(shift) - limiter_gain) if use_limiter and shift > 0: mask = waveform > limiter_threshold temp = (waveform[mask] - limiter_threshold) * limiter_gain / (1 - limiter_threshold) output_waveform[mask] = (temp + limiter_threshold + shift).clamp(max=limiter_threshold) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=+1) elif use_limiter and shift < 0: mask = waveform < -limiter_threshold temp = (waveform[mask] + limiter_threshold) * limiter_gain / (1 - limiter_threshold) output_waveform[mask] = (temp - limiter_threshold + shift).clamp(min=-limiter_threshold) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=+1) else: output_waveform = (waveform + shift).clamp(min=-1, max=+1) return output_waveform ","def dcshift( waveform: Tensor, shift: float, limiter_gain: float = 0. ) -> Tensor: r""""""Apply a DC shift to the audio. Similar to SoX implementation. This can be useful to remove a DC offset (caused perhaps by a hardware problem in the recording chain) from the audio Args: waveform (Tensor): audio waveform of dimension of `(..., time)` shift (float): indicates the amount to shift the audio Allowed range of values for shift : -2.0 to +2.0 limiter_gain (float): It is used only on peaks to prevent clipping It should have a value much less than 1 (e.g. 0.05 or 0.02) Returns: Tensor: Waveform of dimension of `(..., time)` References: http://sox.sourceforge.net/sox.html """""" output_waveform = waveform limiter_threshold = 0. use_limiter = False if limiter_gain != 0.0: use_limiter = True limiter_threshold = 1.0 - (abs(shift) - limiter_gain) if use_limiter and shift > 0: mask = waveform > limiter_threshold temp = (waveform[mask] - limiter_threshold) * limiter_gain / (1 - limiter_threshold) output_waveform[mask] = (temp + limiter_threshold + shift).clamp(max=limiter_threshold) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) elif use_limiter and shift < 0: mask = waveform < -limiter_threshold temp = (waveform[mask] + limiter_threshold) * limiter_gain / (1 - limiter_threshold) output_waveform[mask] = (temp - limiter_threshold + shift).clamp(min=-limiter_threshold) output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=+1) else: output_waveform = (waveform + shift).clamp(min=-1, max=+1) return output_waveform " 28109,"def generate_canonical_request(method, parsed_url, headers, signed_headers, content_sha256): """""" Generate canonical request. :param method: HTTP method. :param parsed_url: Parsed url is input from :func:`urlsplit` :param headers: HTTP header dictionary. :param content_sha256: Content sha256 hexdigest string. """""" # Use url encoded path with ~ as an exception parsed_url_path = parsed_url.path.replace(""%7E"", ""~"") lines = [method, parsed_url_path, parsed_url.query] # Headers added to canonical request. header_lines = [] for header in signed_headers: value = headers[header.title()] value = str(value).strip() header_lines.append(header + ':' + str(value)) lines = lines + header_lines lines.append('') lines.append(';'.join(signed_headers)) lines.append(content_sha256) return '\n'.join(lines) ","def generate_canonical_request(method, parsed_url, headers, signed_headers, content_sha256): """""" Generate canonical request. :param method: HTTP method. :param parsed_url: Parsed url is input from :func:`urlsplit` :param headers: HTTP header dictionary. :param content_sha256: Content sha256 hexdigest string. """""" # Should not encode ~. Decode it back if present. parsed_url_path = parsed_url.path.replace(""%7E"", ""~"") lines = [method, parsed_url_path, parsed_url.query] # Headers added to canonical request. header_lines = [] for header in signed_headers: value = headers[header.title()] value = str(value).strip() header_lines.append(header + ':' + str(value)) lines = lines + header_lines lines.append('') lines.append(';'.join(signed_headers)) lines.append(content_sha256) return '\n'.join(lines) " 14034,"def _convert_to_ewkb(gdf, geom_name, srid): """"""Convert geometries to ewkb. """""" if compat.USE_PYGEOS: from pygeos import set_srid, to_wkb geoms = to_wkb( set_srid(gdf[geom_name].values.data, srid=srid), hex=True, include_srid=True ) else: from shapely.wkb import dumps geoms = [dumps(geom, srid=srid, hex=True) for geom in gdf[geom_name]] # The gdf will warn that the geometry column doesn't hold in-memory geometries # now that they are EWKB. Ignore this warning. with warnings.catch_warnings(): warnings.simplefilter(""ignore"", category=UserWarning) gdf[geom_name] = geoms return gdf ","def _convert_to_ewkb(gdf, geom_name, srid): """"""Convert geometries to ewkb. """""" if compat.USE_PYGEOS: from pygeos import set_srid, to_wkb geoms = to_wkb( set_srid(gdf[geom_name].values.data, srid=srid), hex=True, include_srid=True ) else: from shapely.wkb import dumps geoms = [dumps(geom, srid=srid, hex=True) for geom in gdf[geom_name]] # The gdf will warn that the geometry column doesn't hold in-memory geometries # now that they are EWKB. Ignore this warning. with warnings.catch_warnings(): warnings.filterwarnings(""ignore"", ""Geometry column does not contain geometry."", UserWarning) gdf[geom_name] = geoms return gdf " 54927,"def AmplitudeEmbedding(features, wires, pad): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``. The absolute square of all elements in ``features`` has to add up to one. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): Input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if pad==True and 2**len(wires) != len(features): features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant') if pad==False and 2**len(wires) != len(features): raise ValueError(""AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "" ""got {}."".format(2 ** len(wires), len(features))) if np.linalg.norm(features,2) != 1: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector."") QubitStateVector(features, wires=wires) ","def AmplitudeEmbedding(features, wires, pad): r""""""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits. If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To enable this, the argument ``pad`` should be set to ``True``. The absolute square of all elements in ``features`` has to add up to one. .. note:: AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with devices that implement this function. Args: features (array): Input array of shape ``(2**n,)`` wires (Sequence[int]): sequence of qubit indices that the template acts on pad (Boolean): controls the activation of the padding option """""" if not isinstance(wires, Iterable): raise ValueError(""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires)) if pad==True and 2**len(wires) != len(features): features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant') if pad==False and 2**len(wires) != len(features): raise ValueError(""AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "" ""got {}."".format(2 ** len(wires), len(features))) if np.linalg.norm(features,2) != 1: raise ValueError(""AmplitudeEmbedding requires a normalized feature vector."") QubitStateVector(features, wires=wires) " 46064,"def torch_meshgrid(*tensors): """"""A wrapper of torch.meshgrid to compat different PyTorch versions. Since PyTorch 1.10.0a0, torch.meshgrid supports the arguments ``indexing``. So we implement a wrapper here to avoid warning when using high-version PyTorch and avoid compatibility issues when using previous versions of PyTorch. Args: tensors (list of Tensor): List of scalars or 1 dimensional tensors. Returns: seq (sequence of Tensors): Sequence of meshgrid tensors. """""" if _torch_version_meshgrid_indexing: return torch.meshgrid(*tensors, indexing='ij') else: return torch.meshgrid(*tensors) # Uses indexing='ij' by default ","def torch_meshgrid(*tensors): """"""A wrapper of torch.meshgrid to compat different PyTorch versions. Since PyTorch 1.10.0a0, torch.meshgrid supports the arguments ``indexing``. So we implement a wrapper here to avoid warning when using high-version PyTorch and avoid compatibility issues when using previous versions of PyTorch. Args: tensors (list of Tensor): List of scalars or 1 dimensional tensors. Returns: Sequence[Tensor]: Sequence of meshgrid tensors. """""" if _torch_version_meshgrid_indexing: return torch.meshgrid(*tensors, indexing='ij') else: return torch.meshgrid(*tensors) # Uses indexing='ij' by default " 42925,"def orbits(photon_number: int) -> Generator[list, None, None]: """"""Generate all the possible orbits for a given photon number. Provides a generator over the integer partitions of ``photon_number``. Code derived from `website `__ of Jerome Kelleher's, which is based upon an algorithm from Ref. :cite:`kelleher2009generating`. **Example usage:** >>> o = orbits(5) >>> list(o) [[1, 1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1], [2, 2, 1], [4, 1], [3, 2], [5]] Args: photon_number (int): number of photons to generate orbits from Returns: Generator[list[int]]: orbits with total photon number adding up to ``photon_number`` """""" a = [0] * (photon_number + 1) k = 1 y = photon_number - 1 while k != 0: x = a[k - 1] + 1 k -= 1 while 2 * x <= y: a[k] = x y -= x k += 1 l = k + 1 while x <= y: a[k] = x a[l] = y yield sorted(a[: k + 2], reverse=True) x += 1 y -= 1 a[k] = x + y y = x + y - 1 yield sorted(a[: k + 1], reverse=True) ","def orbits(photon_number: int) -> Generator[list, None, None]: """"""Generate all the possible orbits for a given photon number. Provides a generator over the integer partitions of ``photon_number``. Code derived from `website `__ of Jerome Kelleher's, which is based upon an algorithm from Ref. :cite:`kelleher2009generating`. **Example usage:** >>> o = orbits(5) >>> list(o) [[1, 1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1], [2, 2, 1], [4, 1], [3, 2], [5]] Args: photon_number (int): number of photons to generate orbits from Returns: Generator[list[int]]: orbits with total photon number adding up to ``photon_number`` """""" a = [0] * (photon_number + 1) k = 1 y = photon_number - 1 n_samples = len(samples) while k != 0: x = a[k - 1] + 1 k -= 1 while 2 * x <= y: a[k] = x y -= x k += 1 l = k + 1 while x <= y: a[k] = x a[l] = y yield sorted(a[: k + 2], reverse=True) x += 1 y -= 1 a[k] = x + y y = x + y - 1 yield sorted(a[: k + 1], reverse=True) " 7248,"def threshold_otsu(image, nbins=256): """"""Return threshold value based on Otsu's method. Parameters ---------- image : (N, M) ndarray Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. Raises ------ ValueError If ``image`` only contains a single grayscale value. References ---------- .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh Notes ----- The input image must be grayscale. """""" if image.ndim > 2 and image.shape[-1] in (3, 4): msg = ""threshold_otsu is expected to work correctly only for "" \ ""grayscale images; image shape {0} looks like an RGB image"" warn(msg.format(image.shape)) # Check if the image is multi-colored or not if np.all(image == image[0]): raise ValueError(""Cannot threshold single-valued images"") hist, bin_centers = histogram(image.ravel(), nbins, source_range='image') hist = hist.astype(float) # class probabilities for all possible thresholds weight1 = np.cumsum(hist) weight2 = np.cumsum(hist[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(hist * bin_centers) / weight1 mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of ``weight1``/``mean1`` should pair with zero values in # ``weight2``/``mean2``, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[:-1][idx] return threshold ","def threshold_otsu(image, nbins=256): """"""Return threshold value based on Otsu's method. Parameters ---------- image : (N, M) ndarray Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. Raises ------ ValueError If ``image`` only contains a single grayscale value. References ---------- .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh Notes ----- The input image must be grayscale. """""" if image.ndim > 2 and image.shape[-1] in (3, 4): msg = ""threshold_otsu is expected to work correctly only for "" \ ""grayscale images; image shape {0} looks like an RGB image"" warn(msg.format(image.shape)) # Check if the image is multi-colored or not if np.all(image == image.reshape(-1)[0]): raise ValueError(""Cannot threshold single-valued images"") hist, bin_centers = histogram(image.ravel(), nbins, source_range='image') hist = hist.astype(float) # class probabilities for all possible thresholds weight1 = np.cumsum(hist) weight2 = np.cumsum(hist[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(hist * bin_centers) / weight1 mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of ``weight1``/``mean1`` should pair with zero values in # ``weight2``/``mean2``, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[:-1][idx] return threshold " 7177,"def _get_fourier_filter(size, filter_name): """"""Construct the Fourier filter This computation lessens artifacts and removes a small bias as explained in [1], Chap 3. Equation 61 Parameters ---------- size: int filter size. filter_name: str, optional Filter used in frequency domain filtering. Ramp filter used by default. Filters available: ramp, shepp-logan, cosine, hamming, hann. Assign None to use no filter. Returns ------- fourier_filter: ndarray The computed Fourier filter. References ---------- .. [1] AC Kak, M Slaney, ""Principles of Computerized Tomographic Imaging"", IEEE Press 1988. """""" n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int), np.arange(size / 2 - 1, 0, -2, dtype=np.int))) f = np.zeros(size) f[0] = 0.25 f[1::2] = -1 / (np.pi * n) ** 2 # Computing the ramp filter from the fourier transform of its # frequency domain representation lessens artifacts and removes a # small bias as explained in [1], Chap 3. Equation 61 fourier_filter = 2 * np.real(fft(f)) # ramp filter if filter_name == ""ramp"": pass elif filter_name == ""shepp-logan"": # Start from first element to avoid divide by zero omega = np.pi * fftmodule.fftfreq(size)[1:] fourier_filter[1:] *= np.sin(omega) / omega elif filter_name == ""cosine"": freq = np.pi * np.linspace(0, 1, size, endpoint=False) cosine_filter = fftmodule.fftshift(np.sin(freq)) fourier_filter *= cosine_filter elif filter_name == ""hamming"": fourier_filter *= fftmodule.fftshift(np.hamming(size)) elif filter_name == ""hann"": fourier_filter *= fftmodule.fftshift(np.hanning(size)) elif filter_name is None: fourier_filter[:] = 1 return fourier_filter[:, np.newaxis] ","def _get_fourier_filter(size, filter_name): """"""Construct the Fourier filter This computation lessens artifacts and removes a small bias as explained in [1], Chap 3. Equation 61 Parameters ---------- size: int filter size. filter_name: str, optional Filter used in frequency domain filtering. Ramp filter used by default. Filters available: ramp, shepp-logan, cosine, hamming, hann. Assign None to use no filter. Returns ------- fourier_filter: ndarray The computed Fourier filter. References ---------- .. [1] AC Kak, M Slaney, ""Principles of Computerized Tomographic Imaging"", IEEE Press 1988. """""" n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int), np.arange(size / 2 - 1, 0, -2, dtype=np.int))) f = np.zeros(size) f[0] = 0.25 f[1::2] = -1 / (np.pi * n) ** 2 # Computing the ramp filter from the fourier transform of its # frequency domain representation lessens artifacts and removes a # small bias as explained in [1], Chap 3. Equation 61 fourier_filter = 2 * np.real(fft(f)) # ramp filter if filter_name == ""ramp"": pass elif filter_name == ""shepp-logan"": # Start from first element to avoid divide by zero omega = np.pi * fftmodule.fftfreq(size)[1:] fourier_filter[1:] *= np.sin(omega) / omega elif filter_name == ""cosine"": freq = np.pi * np.linspace(0, 1, size, endpoint=False) cosine_filter = fftmodule.fftshift(np.sin(freq)) fourier_filter *= cosine_filter elif filter_name == ""hamming"": fourier_filter *= fftmodule.fftshift(np.hamming(size)) elif filter_name == ""hann"": fourier_filter *= fftmodule.fftshift(np.hanning(size)) elif filter_name is None: fourier_filter[:] = 1 return fourier_filter[:, np.newaxis] " 20225,"def _get_deploy_environment(): return getattr(settings, 'DEPLOY_ENVIRONMENT', None) ","def _get_deploy_environment(): return getattr(settings, 'DEPLOY_ENVIRONMENT', None) or 'local' " 10643,"def _list_plugins_from_paths(ptype, dirs, collection, depth=0): plugins = {} for path in dirs: display.debug(""Searching '{0}'s '{1}' for {2} plugins"".format(collection, path, ptype)) b_path = to_bytes(path) if os.path.basename(b_path).startswith((b'.', b'__')): # skip hidden/special dirs continue if os.path.exists(b_path): if os.path.isdir(b_path): bkey = ptype.lower() for plugin_file in os.listdir(b_path): if plugin_file.startswith((b'.', b'__')): # hidden or python internal file/dir continue display.debug(""Found possible plugin: '{0}'"".format(plugin_file)) b_plugin, ext = os.path.splitext(plugin_file) plugin = to_native(b_plugin) full_path = os.path.join(b_path, plugin_file) if os.path.isdir(full_path): # its a dir, recurse if collection in C.SYNTHETIC_COLLECTIONS: if not os.path.exists(os.path.join(full_path, b'__init__.py')): # dont recurse for synthetic unless init.py present continue # actually recurse dirs plugins.update(_list_plugins_from_paths(ptype, [to_native(full_path)], collection, depth=depth + 1)) else: if any([ plugin in C.IGNORE_FILES, # general files to ignore ext in C.REJECT_EXTS, # general extensions to ignore ext in (b'.yml', b'.yaml', b'.json'), # ignore docs files TODO: constant! plugin in IGNORE.get(bkey, ()), # plugin in reject list os.path.islink(full_path), # skip aliases, author should document in 'aliaes' field ]): continue if ptype in ('test', 'filter'): try: file_plugins = _list_j2_plugins_from_file(collection, full_path, ptype, plugin) except KeyError as e: display.warning('Skipping file %s: %s' % (full_path, to_native(e))) continue for plugin in file_plugins: if plugin._load_name.startswith(collection): plugin_name = plugin._load_name else: plugin_name = get_composite_name(collection, plugin._load_name, full_path, depth) plugins[plugin_name] = full_path else: plugin = get_composite_name(collection, plugin, path, depth) plugins[plugin] = full_path else: display.debug(""Skip listing plugins in '{0}' as it is not a directory"".format(path)) else: display.debug(""Skip listing plugins in '{0}' as it does not exist"".format(path)) return plugins ","def _list_plugins_from_paths(ptype, dirs, collection, depth=0): plugins = {} for path in dirs: display.debug(""Searching '{0}'s '{1}' for {2} plugins"".format(collection, path, ptype)) b_path = to_bytes(path) if os.path.basename(b_path).startswith((b'.', b'__')): # skip hidden/special dirs continue if os.path.exists(b_path): if os.path.isdir(b_path): bkey = ptype.lower() for plugin_file in os.listdir(b_path): if plugin_file.startswith((b'.', b'__')): # hidden or python internal file/dir continue display.debug(""Found possible plugin: '{0}'"".format(plugin_file)) b_plugin, ext = os.path.splitext(plugin_file) plugin = to_native(b_plugin) full_path = os.path.join(b_path, plugin_file) if os.path.isdir(full_path): # its a dir, recurse if collection in C.SYNTHETIC_COLLECTIONS: if not os.path.exists(os.path.join(full_path, b'__init__.py')): # dont recurse for synthetic unless init.py present continue # actually recurse dirs plugins.update(_list_plugins_from_paths(ptype, [to_native(full_path)], collection, depth=depth + 1)) else: if any([ plugin in C.IGNORE_FILES, # general files to ignore ext in C.REJECT_EXTS, # general extensions to ignore ext in (b'.yml', b'.yaml', b'.json'), # ignore docs files TODO: constant! plugin in IGNORE.get(bkey, ()), # plugin in reject list os.path.islink(full_path), # skip aliases, author should document in 'aliaes' field ]): continue if ptype in ('test', 'filter'): try: file_plugins = _list_j2_plugins_from_file(collection, full_path, ptype, plugin) except KeyError as e: display.warning('Skipping file %s: %s' % (full_path, to_native(e))) continue for plugin in file_plugins: plugin_name = get_composite_name(collection, plugin._load_name, full_path, depth) plugins[plugin_name] = full_path else: plugin = get_composite_name(collection, plugin, path, depth) plugins[plugin] = full_path else: display.debug(""Skip listing plugins in '{0}' as it is not a directory"".format(path)) else: display.debug(""Skip listing plugins in '{0}' as it does not exist"".format(path)) return plugins " 5770,"def roots_legendre(n, mu=False): r""""""Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature [GL]_. The sample points are the roots of the nth degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [GL] Gauss-Legendre quadrature, Wikipedia, https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature Examples -------- >>> from scipy.special import roots_legendre, eval_legendre >>> roots, weights = roots_legendre(9) ``roots`` holds the roots, and ``weights`` holds the weights for Gauss-Legendre quadrature. >>> roots array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. , 0.32425342, 0.61337143, 0.83603111, 0.96816024]) >>> weights array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936, 0.31234708, 0.2606107 , 0.18064816, 0.08127439]) Verify that we have the roots by evaluating the degree 9 Legendre polynomial at ``roots``. All the values are approximately zero: >>> eval_legendre(9, roots) array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16, 0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16, -8.32667268e-17]) Here we'll show how the above values can be used to estimate the integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre quadrature [GL]_. First define the function and the integration limits. >>> def f(t): ... return t + 1/t ... >>> a = 1 >>> b = 2 We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral of f from t=a to t=b. The sample points in ``roots`` are from the interval [-1, 1], so we'll rewrite the integral with the simple change of variable:: x = 2/(b - a) * t - (a + b)/(b - a) with inverse:: t = (b - a)/2 * x + (a + 2)/2 Then:: integral(f(t), a, b) = (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1) We can approximate the latter integral with the values returned by `roots_legendre`. Map the roots computed above from [-1, 1] to [a, b]. >>> t = (b - a)/2 * roots + (a + b)/2 Approximate the integral as the weighted sum of the function values. >>> (b - a)/2 * f(t).dot(weights) 2.1931471805599276 Compare that to the exact result, which is 3/2 + log(2): >>> 1.5 + np.log(2) 2.1931471805599454 """""" m = int(n) if n < 1 or n != m: raise ValueError(""n must be a positive integer."") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) ","def roots_legendre(n, mu=False): r""""""Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature [GL]_. The sample points are the roots of the nth degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [GL] Gauss-Legendre quadrature, Wikipedia, https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature Examples -------- >>> from scipy.special import roots_legendre, eval_legendre >>> roots, weights = roots_legendre(9) ``roots`` holds the roots, and ``weights`` holds the weights for Gauss-Legendre quadrature. >>> roots array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. , 0.32425342, 0.61337143, 0.83603111, 0.96816024]) >>> weights array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936, 0.31234708, 0.2606107 , 0.18064816, 0.08127439]) Verify that we have the roots by evaluating the degree 9 Legendre polynomial at ``roots``. All the values are approximately zero: >>> eval_legendre(9, roots) array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16, 0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16, -8.32667268e-17]) Here we'll show how the above values can be used to estimate the integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre quadrature [GL]_. First define the function and the integration limits. >>> def f(t): ... return t + 1/t ... >>> a = 1 >>> b = 2 We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral of f from t=a to t=b. The sample points in ``roots`` are from the interval [-1, 1], so we'll rewrite the integral with the simple change of variable:: x = 2/(b - a) * t - (a + b)/(b - a) with inverse:: t = (b - a)/2 * x + (a + 2)/2 Then:: integral(f(t), a, b) = (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1) We can approximate the latter integral with the values returned by `roots_legendre`. Map the roots computed above from [-1, 1] to [a, b]. >>> t = (b - a)/2 * roots + (a + b)/2 Approximate the integral as the weighted sum of the function values. >>> (b - a)/2 * f(t).dot(weights) 2.1931471805599276 Compare that to the exact result, which is :math:`3/2 + log(2)`: >>> 1.5 + np.log(2) 2.1931471805599454 """""" m = int(n) if n < 1 or n != m: raise ValueError(""n must be a positive integer."") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) " 46599,"def _resolve(cfg: Any) -> Any: if isinstance(cfg, DictConfig): for k in cfg.keys(): node = cfg._get_node(k) cfg[k] = _resolve(node) if isinstance(cfg, ListConfig): for i in range(len(cfg)): node = cfg._get_node(i) cfg[i] = _resolve(node) elif isinstance(cfg, ValueNode): try: cfg = cfg._dereference_node() except InterpolationToMissingValueError: cfg = MISSING return cfg ","def _resolve(cfg: Any) -> Any: if isinstance(cfg, DictConfig): for k in cfg.keys(): node = cfg._get_node(k) cfg[k] = _resolve(node) elif isinstance(cfg, ListConfig): for i in range(len(cfg)): node = cfg._get_node(i) cfg[i] = _resolve(node) elif isinstance(cfg, ValueNode): try: cfg = cfg._dereference_node() except InterpolationToMissingValueError: cfg = MISSING return cfg " 1521,"def test_make_blobs_return_centers(): n_samples = [10, 20] n_features = 3 X, y, centers = make_blobs(n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0) assert_array_equal(centers.shape, [len(n_samples), n_features]) ","def test_make_blobs_return_centers(): n_samples = [10, 20] n_features = 3 X, y, centers = make_blobs(n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0) assert centers.shape == (len(n_samples), n_features) " 5998,"def get_simd_group_size(dev, type_size): """"""Return an estimate of how many work items will be executed across SIMD lanes. This returns the size of what Nvidia calls a warp and what AMD calls a wavefront. Only refers to implicit SIMD. :arg type_size: number of bytes in vector entry type. """""" try: return dev.warp_size_nv except Exception: pass lc_vendor = dev.platform.vendor.lower() lc_real_vendor = dev.vendor.lower() if ""nvidia"" in lc_vendor or ""nvidia"" in lc_real_vendor: return 32 if (""advanced micro"" in lc_vendor or ""ati"" in lc_vendor or ""advanced micro"" in lc_real_vendor or ""ati"" in lc_real_vendor): if dev.type & cl.device_type.GPU: # Tomasz Rybak says, in response to reduction misbehaving on the AMD # 'Loveland' APU: # # Like in CUDA reduction bug (related to Fermi) it again seems # to be related to too eager concurrency when reducing results. # According to http://oscarbg.blogspot.com/2009/10/news-from-web.html # ""Actually the wavefront size is only 64 for the highend cards(48XX, # 58XX, 57XX), but 32 for the middleend cards and 16 for the lowend # cards."" # IMO we should use PREFERRED_WORK_GROUP_SIZE_MULTIPLE to get # non_sync_size. At the same size we lose SIMD CPU optimisation, # but I do not know for now how to fix those two at the same time. # Attached patch fixes problem on Loveland, not breaking anything on # NVIDIA ION. # This is therefore our best guess as to the SIMD group size. return reasonable_work_group_size_multiple(dev) elif dev.type & cl.device_type.CPU: return 1 else: raise RuntimeError(""unexpected AMD device type"") if dev.type & cl.device_type.CPU: # implicit assumption: Impl. will vectorize return 1 return None ","def get_simd_group_size(dev, type_size): """"""Return an estimate of how many work items will be executed across SIMD lanes. This returns the size of what Nvidia calls a warp and what AMD calls a wavefront. Only refers to implicit SIMD. :arg type_size: number of bytes in vector entry type. """""" try: return dev.warp_size_nv except Exception: pass lc_plat_vendor = dev.platform.vendor.lower() lc_real_vendor = dev.vendor.lower() if ""nvidia"" in lc_vendor or ""nvidia"" in lc_real_vendor: return 32 if (""advanced micro"" in lc_vendor or ""ati"" in lc_vendor or ""advanced micro"" in lc_real_vendor or ""ati"" in lc_real_vendor): if dev.type & cl.device_type.GPU: # Tomasz Rybak says, in response to reduction misbehaving on the AMD # 'Loveland' APU: # # Like in CUDA reduction bug (related to Fermi) it again seems # to be related to too eager concurrency when reducing results. # According to http://oscarbg.blogspot.com/2009/10/news-from-web.html # ""Actually the wavefront size is only 64 for the highend cards(48XX, # 58XX, 57XX), but 32 for the middleend cards and 16 for the lowend # cards."" # IMO we should use PREFERRED_WORK_GROUP_SIZE_MULTIPLE to get # non_sync_size. At the same size we lose SIMD CPU optimisation, # but I do not know for now how to fix those two at the same time. # Attached patch fixes problem on Loveland, not breaking anything on # NVIDIA ION. # This is therefore our best guess as to the SIMD group size. return reasonable_work_group_size_multiple(dev) elif dev.type & cl.device_type.CPU: return 1 else: raise RuntimeError(""unexpected AMD device type"") if dev.type & cl.device_type.CPU: # implicit assumption: Impl. will vectorize return 1 return None " 57931,"def list_attached_group_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) group_name = args.get('groupName', """") marker = args.get('marker', None) limit, is_manual, page_size = get_limit(args) kwargs = { 'GroupName': group_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_group_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size and len(data) > page_size: data = data[-1 * args.get('page_size'):] policy_data = [] for policy in data: policy_data.append({ 'GroupName': group_name, 'PolicyArn': policy.get('PolicyArn', ''), 'PolicyName': policy.get('PolicyName', '') }) ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && ' 'val.GroupName === obj.GroupName)': policy_data, 'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec) ","def list_attached_group_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) group_name = args.get('groupName') marker = args.get('marker') limit, is_manual, page_size = get_limit(args) kwargs = { 'GroupName': group_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_group_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size and len(data) > page_size: data = data[-1 * args.get('page_size'):] policy_data = [] for policy in data: policy_data.append({ 'GroupName': group_name, 'PolicyArn': policy.get('PolicyArn', ''), 'PolicyName': policy.get('PolicyName', '') }) ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && ' 'val.GroupName === obj.GroupName)': policy_data, 'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec) " 10673,"def do_vcs_install(manifest_in, versionfile_source, ipy): """"""Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """""" GITS = [""git""] if sys.platform == ""win32"": GITS = [""git.cmd"", ""git.exe""] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith("".pyc"") or me.endswith("".pyo""): me = os.path.splitext(me)[0] + "".py"" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = ""versioneer.py"" files.append(versioneer_file) present = False try: with open("".gitattributes"", ""r"") as f: for line in f.readlines(): if line.strip().startswith(versionfile_source): if ""export-subst"" in line.strip().split()[1:]: present = True except EnvironmentError: pass if not present: f = open("".gitattributes"", ""a+"") f.write(""%s export-subst\n"" % versionfile_source) f.close() files.append("".gitattributes"") run_command(GITS, [""add"", ""--""] + files) ","def do_vcs_install(manifest_in, versionfile_source, ipy): """"""Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """""" GITS = [""git""] if sys.platform == ""win32"": GITS = [""git.cmd"", ""git.exe""] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith("".pyc"") or me.endswith("".pyo""): me = os.path.splitext(me)[0] + "".py"" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = ""versioneer.py"" files.append(versioneer_file) present = False try: with open("".gitattributes"", ""r"") as f: for line in f.readlines(): if line.strip().startswith(versionfile_source): if ""export-subst"" in line.strip().split()[1:]: present = True except EnvironmentError: pass if not present: f = open("".gitattributes"", ""a+"") f.write(""%s export-subst\n"" % versionfile_source) f.close() files.append("".gitattributes"") run_command(GITS, [""add"", ""--""] + files) " 22506,"def get_rules_specification(): return yaml.safe_load(resource_string(__name__.rsplit('.', 1)[0], 'rules_dsl_spec.yml')) ","def get_rules_specification(): return yaml.safe_load(resource_string(__package__, ""rules_dsl_spec.yml"")) " 21991,"def update_local_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test=False, ): """"""Update the DB table local_threepid_associations so that all stored emails are casefolded, and any duplicate mxid's associated with the given email are deleted. :return: None """""" cur = db.cursor() res = cur.execute( ""SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"" ""ORDER BY ts DESC"" ) # a dict that associates an email address with correspoinding mxids and lookup hashes associations: Dict[str, List[Tuple[str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, and add to # associations dict for address, mxid in res.fetchall(): casefold_address = address.casefold() # rehash email since hashes are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) if casefold_address in associations: associations[casefold_address].append((address, mxid, lookup_hash)) else: associations[casefold_address] = [(address, mxid, lookup_hash)] # list of arguments to update db with db_update_args: List[Tuple[str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] # The MXIDs associated with rows we're about to delete, indexed by the casefolded # address they're associated with. to_delete_mxids: Dict[str, Set[str]] = {} # The MXIDs associated with rows we're not going to delete, so we can compare the one # associated with a given casefolded address with the one(s) we want to delete for the # same address and figure out if we want to send them an email. to_keep_mxids: Dict[str, str] = {} for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. to_delete_mxids[casefold_address] = set() to_keep_mxids[casefold_address] = assoc_tuples[0][1].lower() for address, mxid, _ in assoc_tuples[1:]: to_delete.append((address,)) to_delete_mxids[casefold_address].add(mxid.lower()) if not test: print( f""{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in local_threepid_associations"" ) # Update the database before sending the emails, that way if the update fails the # affected users haven't been notified. if not dry_run: if len(to_delete) > 0: cur.executemany( ""DELETE FROM local_threepid_associations WHERE address = ?"", to_delete ) if len(db_update_args) > 0: cur.executemany( ""UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE address = ? AND mxid = ?"", db_update_args, ) # We've finished updating the database, committing the transaction. db.commit() # iterate through the mxids and send emails if send_email and not dry_run: for address, mxids in to_delete_mxids.items(): for mxid in mxids: # If the MXID is one that will still be associated with this email address # after this run, don't send an email for it. if mxid == to_keep_mxids[address]: continue # Send the email with exponential backoff - that way we don't stop # sending halfway through if the SMTP server rejects our email (e.g. # because of rate limiting). The alternative would mean the first # addresses of the list receive duplicate emails. def sendWithBackoff(backoff): time.sleep(backoff) try: templateFile = sydent.get_branded_template( None, ""migration_template.eml"", (""email"", ""email.template""), ) sendEmail( sydent, templateFile, address, {""mxid"": mxid, ""subject_header_value"": EMAIL_SUBJECT}, log_send_errors=False, ) if not test: print(""Sent email to %s"" % address) except EmailSendException: if not test: print( ""Failed to send email to %s, retrying in %ds"" % (address, backoff * 2) ) sendWithBackoff(backoff * 2) sendWithBackoff(1 if not test else 0) ","def update_local_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test: bool = False, ): """"""Update the DB table local_threepid_associations so that all stored emails are casefolded, and any duplicate mxid's associated with the given email are deleted. :return: None """""" cur = db.cursor() res = cur.execute( ""SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"" ""ORDER BY ts DESC"" ) # a dict that associates an email address with correspoinding mxids and lookup hashes associations: Dict[str, List[Tuple[str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, and add to # associations dict for address, mxid in res.fetchall(): casefold_address = address.casefold() # rehash email since hashes are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) if casefold_address in associations: associations[casefold_address].append((address, mxid, lookup_hash)) else: associations[casefold_address] = [(address, mxid, lookup_hash)] # list of arguments to update db with db_update_args: List[Tuple[str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] # The MXIDs associated with rows we're about to delete, indexed by the casefolded # address they're associated with. to_delete_mxids: Dict[str, Set[str]] = {} # The MXIDs associated with rows we're not going to delete, so we can compare the one # associated with a given casefolded address with the one(s) we want to delete for the # same address and figure out if we want to send them an email. to_keep_mxids: Dict[str, str] = {} for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. to_delete_mxids[casefold_address] = set() to_keep_mxids[casefold_address] = assoc_tuples[0][1].lower() for address, mxid, _ in assoc_tuples[1:]: to_delete.append((address,)) to_delete_mxids[casefold_address].add(mxid.lower()) if not test: print( f""{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in local_threepid_associations"" ) # Update the database before sending the emails, that way if the update fails the # affected users haven't been notified. if not dry_run: if len(to_delete) > 0: cur.executemany( ""DELETE FROM local_threepid_associations WHERE address = ?"", to_delete ) if len(db_update_args) > 0: cur.executemany( ""UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE address = ? AND mxid = ?"", db_update_args, ) # We've finished updating the database, committing the transaction. db.commit() # iterate through the mxids and send emails if send_email and not dry_run: for address, mxids in to_delete_mxids.items(): for mxid in mxids: # If the MXID is one that will still be associated with this email address # after this run, don't send an email for it. if mxid == to_keep_mxids[address]: continue # Send the email with exponential backoff - that way we don't stop # sending halfway through if the SMTP server rejects our email (e.g. # because of rate limiting). The alternative would mean the first # addresses of the list receive duplicate emails. def sendWithBackoff(backoff): time.sleep(backoff) try: templateFile = sydent.get_branded_template( None, ""migration_template.eml"", (""email"", ""email.template""), ) sendEmail( sydent, templateFile, address, {""mxid"": mxid, ""subject_header_value"": EMAIL_SUBJECT}, log_send_errors=False, ) if not test: print(""Sent email to %s"" % address) except EmailSendException: if not test: print( ""Failed to send email to %s, retrying in %ds"" % (address, backoff * 2) ) sendWithBackoff(backoff * 2) sendWithBackoff(1 if not test else 0) " 45015,"def list_run_artifact_links( account_id: int, run_id: int, token: str, domain: str = None, ) -> List[Tuple[str, str]]: """""" Lists URLs that can be used to download artifacts from a dbt run Args: - account_id (int): dbt Cloud account ID - run_id (int): dbt Cloud job run ID - token (str): dbt Cloud token - domain (str): The domain the function should call, normally cloud.getdbt.com Returns: - List of artifact download URLs Raises: - DbtCloudListArtifactsFailed: if API to list dbt artifacts fails """""" list_run_artifact_response = requests.get( url=__DBT_CLOUD_LIST_RUN_ARTIFACTS_ENDPOINT_V2.format( accountId=account_id, runId=run_id, apiDomain=domain ), headers={""Authorization"": f""Bearer {token}""}, ) if list_run_artifact_response.status_code != 200: raise DbtCloudListArtifactsFailed(list_run_artifact_response.reason) artifact_paths = list_run_artifact_response.json().get(""data"") return [ ( __DBT_CLOUD_GET_RUN_ARTIFACT_ENDPOINT_V2.format( accountId=account_id, runId=run_id, path=artifact_path, apiDomain=domain ), artifact_path, ) for artifact_path in artifact_paths ] ","def list_run_artifact_links( account_id: int, run_id: int, token: str, domain: str = None, ) -> List[Tuple[str, str]]: """""" Lists URLs that can be used to download artifacts from a dbt run Args: - account_id (int): dbt Cloud account ID - run_id (int): dbt Cloud job run ID - token (str): dbt Cloud token - domain (str): The domain the function should call (e.g. `cloud.getdbt.com`). Returns: - List of artifact download URLs Raises: - DbtCloudListArtifactsFailed: if API to list dbt artifacts fails """""" list_run_artifact_response = requests.get( url=__DBT_CLOUD_LIST_RUN_ARTIFACTS_ENDPOINT_V2.format( accountId=account_id, runId=run_id, apiDomain=domain ), headers={""Authorization"": f""Bearer {token}""}, ) if list_run_artifact_response.status_code != 200: raise DbtCloudListArtifactsFailed(list_run_artifact_response.reason) artifact_paths = list_run_artifact_response.json().get(""data"") return [ ( __DBT_CLOUD_GET_RUN_ARTIFACT_ENDPOINT_V2.format( accountId=account_id, runId=run_id, path=artifact_path, apiDomain=domain ), artifact_path, ) for artifact_path in artifact_paths ] " 37519,"def assemble( experiments: Union[ QuantumCircuit, List[QuantumCircuit], Schedule, List[Schedule], ScheduleBlock, ScheduleBlock, ], backend: Optional[Union[Backend, BaseBackend]] = None, qobj_id: Optional[str] = None, qobj_header: Optional[Union[QobjHeader, Dict]] = None, shots: Optional[int] = None, memory: Optional[bool] = False, max_credits: Optional[int] = None, seed_simulator: Optional[int] = None, qubit_lo_freq: Optional[List[float]] = None, meas_lo_freq: Optional[List[float]] = None, qubit_lo_range: Optional[List[float]] = None, meas_lo_range: Optional[List[float]] = None, schedule_los: Optional[ Union[ List[Union[Dict[PulseChannel, float], LoConfig]], Union[Dict[PulseChannel, float], LoConfig], ] ] = None, meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED, meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE, meas_map: Optional[List[List[Qubit]]] = None, memory_slot_size: int = 100, rep_time: Optional[int] = None, rep_delay: Optional[float] = None, parameter_binds: Optional[List[Dict[Parameter, float]]] = None, parametric_pulses: Optional[List[str]] = None, init_qubits: bool = True, **run_config: Dict, ) -> Qobj: """"""Assemble a list of circuits or pulse schedules into a ``Qobj``. This function serializes the payloads, which could be either circuits or schedules, to create ``Qobj`` ""experiments"". It further annotates the experiment payload with header and configurations. Args: experiments: Circuit(s) or pulse schedule(s) to execute backend: If set, some runtime options are automatically grabbed from ``backend.configuration()`` and ``backend.defaults()``. If any other option is explicitly set (e.g., ``rep_time``), it will override the backend's. If any other options is set in the run_config, it will also override the backend's. qobj_id: String identifier to annotate the ``Qobj`` qobj_header: User input that will be inserted in ``Qobj`` header, and will also be copied to the corresponding Result header. Headers do not affect the run. shots: Number of repetitions of each circuit, for sampling. Default: 1024 or ``max_shots`` from the backend configuration, whichever is smaller memory: If ``True``, per-shot measurement bitstrings are returned as well (provided the backend supports it). For OpenPulse jobs, only measurement level 2 supports this option. max_credits: Maximum credits to spend on job. Default: 10 seed_simulator: Random seed to control sampling, for when backend is a simulator qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits.`` meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits.`` qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]`` in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.`` meas_lo_range: List of job level measurement LO ranges each of form ``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length ``n_qubits.`` schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for qubit drive and measurement channels. These values override the job level values from ``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm and pulse jobs. meas_level: Set the appropriate level of the measurement output for pulse experiments. meas_return: Level of measurement data for the backend to return. For ``meas_level`` 0 and 1: * ``single`` returns information from every shot. * ``avg`` returns average measurement output (averaged over number of shots). meas_map: List of lists, containing qubits that must be measured together. memory_slot_size: Size of each memory slot if the output is Level 0. rep_time (int): Time per program execution in seconds. Must be from the list provided by the backend (``backend.configuration().rep_times``). Defaults to the first entry. rep_delay (float): Delay between programs in seconds. Only supported on certain backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported, ``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied by the backend (``backend.configuration().rep_delay_range``). Default is given by ``backend.configuration().default_rep_delay``. parameter_binds: List of Parameter bindings over which the set of experiments will be executed. Each list element (bind) should be of the form {Parameter1: value1, Parameter2: value2, ...}. All binds will be executed across all experiments; e.g., if parameter_binds is a length-n list, and there are m experiments, a total of m x n experiments will be run (one for each experiment/bind pair). parametric_pulses: A list of pulse shapes which are supported internally on the backend. Example:: ['gaussian', 'constant'] init_qubits: Whether to reset the qubits to the ground state for each shot. Default: ``True``. **run_config: Extra arguments used to configure the run (e.g., for Aer configurable backends). Refer to the backend documentation for details on these arguments. Returns: A ``Qobj`` that can be run on a backend. Depending on the type of input, this will be either a ``QasmQobj`` or a ``PulseQobj``. Raises: QiskitError: if the input cannot be interpreted as either circuits or schedules """""" start_time = time() experiments = experiments if isinstance(experiments, list) else [experiments] qobj_id, qobj_header, run_config_common_dict = _parse_common_args( backend, qobj_id, qobj_header, shots, memory, max_credits, seed_simulator, init_qubits, rep_delay, qubit_lo_freq, meas_lo_freq, qubit_lo_range, meas_lo_range, schedule_los, **run_config, ) # assemble either circuits or schedules if all(isinstance(exp, QuantumCircuit) for exp in experiments): run_config = _parse_circuit_args( parameter_binds, backend, meas_level, meas_return, parametric_pulses, **run_config_common_dict, ) # If circuits are parameterized, bind parameters and remove from run_config bound_experiments, run_config = _expand_parameters( circuits=experiments, run_config=run_config ) end_time = time() _log_assembly_time(start_time, end_time) return assemble_circuits( circuits=bound_experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config, ) elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments): run_config = _parse_pulse_args( backend, meas_level, meas_return, meas_map, memory_slot_size, rep_time, parametric_pulses, **run_config_common_dict, ) end_time = time() _log_assembly_time(start_time, end_time) return assemble_schedules( schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config ) else: raise QiskitError(""bad input to assemble() function; must be either circuits or schedules"") ","def assemble( experiments: Union[ QuantumCircuit, List[QuantumCircuit], Schedule, List[Schedule], ScheduleBlock, List[ScheduleBlock], ], backend: Optional[Union[Backend, BaseBackend]] = None, qobj_id: Optional[str] = None, qobj_header: Optional[Union[QobjHeader, Dict]] = None, shots: Optional[int] = None, memory: Optional[bool] = False, max_credits: Optional[int] = None, seed_simulator: Optional[int] = None, qubit_lo_freq: Optional[List[float]] = None, meas_lo_freq: Optional[List[float]] = None, qubit_lo_range: Optional[List[float]] = None, meas_lo_range: Optional[List[float]] = None, schedule_los: Optional[ Union[ List[Union[Dict[PulseChannel, float], LoConfig]], Union[Dict[PulseChannel, float], LoConfig], ] ] = None, meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED, meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE, meas_map: Optional[List[List[Qubit]]] = None, memory_slot_size: int = 100, rep_time: Optional[int] = None, rep_delay: Optional[float] = None, parameter_binds: Optional[List[Dict[Parameter, float]]] = None, parametric_pulses: Optional[List[str]] = None, init_qubits: bool = True, **run_config: Dict, ) -> Qobj: """"""Assemble a list of circuits or pulse schedules into a ``Qobj``. This function serializes the payloads, which could be either circuits or schedules, to create ``Qobj`` ""experiments"". It further annotates the experiment payload with header and configurations. Args: experiments: Circuit(s) or pulse schedule(s) to execute backend: If set, some runtime options are automatically grabbed from ``backend.configuration()`` and ``backend.defaults()``. If any other option is explicitly set (e.g., ``rep_time``), it will override the backend's. If any other options is set in the run_config, it will also override the backend's. qobj_id: String identifier to annotate the ``Qobj`` qobj_header: User input that will be inserted in ``Qobj`` header, and will also be copied to the corresponding Result header. Headers do not affect the run. shots: Number of repetitions of each circuit, for sampling. Default: 1024 or ``max_shots`` from the backend configuration, whichever is smaller memory: If ``True``, per-shot measurement bitstrings are returned as well (provided the backend supports it). For OpenPulse jobs, only measurement level 2 supports this option. max_credits: Maximum credits to spend on job. Default: 10 seed_simulator: Random seed to control sampling, for when backend is a simulator qubit_lo_freq: List of job level qubit drive LO frequencies in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits.`` meas_lo_freq: List of measurement LO frequencies in Hz. Overridden by ``schedule_los`` if specified. Must have length ``n_qubits.`` qubit_lo_range: List of job level drive LO ranges each of form ``[range_min, range_max]`` in Hz. Used to validate ``qubit_lo_freq``. Must have length ``n_qubits.`` meas_lo_range: List of job level measurement LO ranges each of form ``[range_min, range_max]`` in Hz. Used to validate ``meas_lo_freq``. Must have length ``n_qubits.`` schedule_los: Experiment level (ie circuit or schedule) LO frequency configurations for qubit drive and measurement channels. These values override the job level values from ``default_qubit_los`` and ``default_meas_los``. Frequencies are in Hz. Settable for qasm and pulse jobs. meas_level: Set the appropriate level of the measurement output for pulse experiments. meas_return: Level of measurement data for the backend to return. For ``meas_level`` 0 and 1: * ``single`` returns information from every shot. * ``avg`` returns average measurement output (averaged over number of shots). meas_map: List of lists, containing qubits that must be measured together. memory_slot_size: Size of each memory slot if the output is Level 0. rep_time (int): Time per program execution in seconds. Must be from the list provided by the backend (``backend.configuration().rep_times``). Defaults to the first entry. rep_delay (float): Delay between programs in seconds. Only supported on certain backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported, ``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied by the backend (``backend.configuration().rep_delay_range``). Default is given by ``backend.configuration().default_rep_delay``. parameter_binds: List of Parameter bindings over which the set of experiments will be executed. Each list element (bind) should be of the form {Parameter1: value1, Parameter2: value2, ...}. All binds will be executed across all experiments; e.g., if parameter_binds is a length-n list, and there are m experiments, a total of m x n experiments will be run (one for each experiment/bind pair). parametric_pulses: A list of pulse shapes which are supported internally on the backend. Example:: ['gaussian', 'constant'] init_qubits: Whether to reset the qubits to the ground state for each shot. Default: ``True``. **run_config: Extra arguments used to configure the run (e.g., for Aer configurable backends). Refer to the backend documentation for details on these arguments. Returns: A ``Qobj`` that can be run on a backend. Depending on the type of input, this will be either a ``QasmQobj`` or a ``PulseQobj``. Raises: QiskitError: if the input cannot be interpreted as either circuits or schedules """""" start_time = time() experiments = experiments if isinstance(experiments, list) else [experiments] qobj_id, qobj_header, run_config_common_dict = _parse_common_args( backend, qobj_id, qobj_header, shots, memory, max_credits, seed_simulator, init_qubits, rep_delay, qubit_lo_freq, meas_lo_freq, qubit_lo_range, meas_lo_range, schedule_los, **run_config, ) # assemble either circuits or schedules if all(isinstance(exp, QuantumCircuit) for exp in experiments): run_config = _parse_circuit_args( parameter_binds, backend, meas_level, meas_return, parametric_pulses, **run_config_common_dict, ) # If circuits are parameterized, bind parameters and remove from run_config bound_experiments, run_config = _expand_parameters( circuits=experiments, run_config=run_config ) end_time = time() _log_assembly_time(start_time, end_time) return assemble_circuits( circuits=bound_experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config, ) elif all(isinstance(exp, (ScheduleBlock, Schedule, Instruction)) for exp in experiments): run_config = _parse_pulse_args( backend, meas_level, meas_return, meas_map, memory_slot_size, rep_time, parametric_pulses, **run_config_common_dict, ) end_time = time() _log_assembly_time(start_time, end_time) return assemble_schedules( schedules=experiments, qobj_id=qobj_id, qobj_header=qobj_header, run_config=run_config ) else: raise QiskitError(""bad input to assemble() function; must be either circuits or schedules"") " 12997,"def update_order_prices(order, discounts): """"""Update prices in order with given discounts and proper taxes."""""" manager = get_plugins_manager() channel = order.channel for line in order: # type: OrderLine if line.variant: product = line.variant.product channel_listing = line.variant.channel_listings.get(channel=channel) collections = product.collections.all() unit_price = line.variant.get_price( product, collections, channel, channel_listing, discounts ) unit_price = TaxedMoney(unit_price, unit_price) line.unit_price = unit_price line.save( update_fields=[ ""currency"", ""unit_price_net_amount"", ""unit_price_gross_amount"", ] ) price = manager.calculate_order_line_unit(line) if price != line.unit_price: line.unit_price = price if price.tax and price.net: line.tax_rate = manager.get_order_tax_rate( order, product, None, price ) line.save() if order.shipping_method: order.shipping_price = manager.calculate_order_shipping(order) order.save( update_fields=[ ""shipping_price_net_amount"", ""shipping_price_gross_amount"", ""currency"", ] ) recalculate_order(order) ","def update_order_prices(order, discounts): """"""Update prices in order with given discounts and proper taxes."""""" manager = get_plugins_manager() channel = order.channel for line in order: # type: OrderLine if line.variant: product = line.variant.product channel_listing = line.variant.channel_listings.get(channel=channel) collections = product.collections.all() unit_price = line.variant.get_price( product, collections, channel, channel_listing, discounts ) unit_price = TaxedMoney(unit_price, unit_price) line.unit_price = unit_price line.save( update_fields=[ ""currency"", ""unit_price_net_amount"", ""unit_price_gross_amount"", ] ) price = manager.calculate_order_line_unit(line) if price != line.unit_price: line.unit_price = price if price.tax and price.net: line.tax_rate = manager.get_order_line_tax_rate( order, product, None, price ) line.save() if order.shipping_method: order.shipping_price = manager.calculate_order_shipping(order) order.save( update_fields=[ ""shipping_price_net_amount"", ""shipping_price_gross_amount"", ""currency"", ] ) recalculate_order(order) " 35107,"def test_fake_quantize_resize_bilinear(): x = relay.var(""x"", shape=[1, 3, 224, 224], dtype=""int8"") zero = relay.const(0) x = relay.qnn.op.dequantize(x, relay.const(2.0), zero) op = relay.image.resize2d(x, size=[4, 4], method=""linear"") op = relay.qnn.op.quantize(op, relay.const(2.0), zero) x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=""int8"") compare_fq_to_int(op, [x_np], allow_rounding_error=True) ","def test_fake_quantize_image_resize_bilinear(): x = relay.var(""x"", shape=[1, 3, 224, 224], dtype=""int8"") zero = relay.const(0) x = relay.qnn.op.dequantize(x, relay.const(2.0), zero) op = relay.image.resize2d(x, size=[4, 4], method=""linear"") op = relay.qnn.op.quantize(op, relay.const(2.0), zero) x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype=""int8"") compare_fq_to_int(op, [x_np], allow_rounding_error=True) " 6949,"def make_boilerplate(dest, app_name, no_git_init=False): if not os.path.exists(dest): print(""Destination directory does not exist"") return # app_name should be in snake_case app_name = frappe.scrub(app_name) hooks = frappe._dict() hooks.app_name = app_name app_title = hooks.app_name.replace(""_"", "" "").title() for key in (""App Title (default: {0})"".format(app_title), ""App Description"", ""App Publisher"", ""App Email"", ""App Icon (default 'octicon octicon-file-directory')"", ""App Color (default 'grey')"", ""App License (default 'MIT')""): hook_key = key.split("" ("")[0].lower().replace("" "", ""_"") hook_val = None while not hook_val: hook_val = cstr(input(key + "": "")) if not hook_val: defaults = { ""app_title"": app_title, ""app_icon"": ""octicon octicon-file-directory"", ""app_color"": ""grey"", ""app_license"": ""MIT"" } if hook_key in defaults: hook_val = defaults[hook_key] if hook_key==""app_name"" and hook_val.lower().replace("" "", ""_"") != hook_val: print(""App Name must be all lowercase and without spaces"") hook_val = """" elif hook_key==""app_title"" and not re.match(r""^(?![\W])[^\d_\s][\w -]+$"", hook_val, re.UNICODE): print(""App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores"") hook_val = """" hooks[hook_key] = hook_val frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""www"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates"", ""pages""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates"", ""includes"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""config""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""public"", ""css"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""public"", ""js"")) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""__init__.py""), ""w"") as f: f.write(frappe.as_unicode(init_template)) with open(os.path.join(dest, hooks.app_name, ""MANIFEST.in""), ""w"") as f: f.write(frappe.as_unicode(manifest_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, ""requirements.txt""), ""w"") as f: f.write(""# frappe -- https://github.com/frappe/frappe is installed via 'bench init'"") with open(os.path.join(dest, hooks.app_name, ""README.md""), ""w"") as f: f.write(frappe.as_unicode(""## {0}\n\n{1}\n\n#### License\n\n{2}"".format(hooks.app_title, hooks.app_description, hooks.app_license))) with open(os.path.join(dest, hooks.app_name, ""license.txt""), ""w"") as f: f.write(frappe.as_unicode(""License: "" + hooks.app_license)) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""modules.txt""), ""w"") as f: f.write(frappe.as_unicode(hooks.app_title)) # These values could contain quotes and can break string declarations # So escaping them before setting variables in setup.py and hooks.py for key in (""app_publisher"", ""app_description"", ""app_license""): hooks[key] = hooks[key].replace(""\\"", ""\\\\"").replace(""'"", ""\\'"").replace(""\"""", ""\\\"""") with open(os.path.join(dest, hooks.app_name, ""setup.py""), ""w"") as f: f.write(frappe.as_unicode(setup_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""hooks.py""), ""w"") as f: f.write(frappe.as_unicode(hooks_template.format(**hooks))) touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, ""patches.txt"")) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""config"", ""desktop.py""), ""w"") as f: f.write(frappe.as_unicode(desktop_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""config"", ""docs.py""), ""w"") as f: f.write(frappe.as_unicode(docs_template.format(**hooks))) app_directory = os.path.join(dest, hooks.app_name) if not no_git_init: with open(os.path.join(dest, hooks.app_name, "".gitignore""), ""w"") as f: f.write(frappe.as_unicode(gitignore_template.format(app_name = hooks.app_name))) # initialize git repository app_repo = git.Repo.init(app_directory) app_repo.git.add(A=True) app_repo.index.commit(""feat: Initialize App"") print(""'{app}' created at {path}"".format(app=app_name, path=app_directory)) ","def make_boilerplate(dest, app_name, no_git=False): if not os.path.exists(dest): print(""Destination directory does not exist"") return # app_name should be in snake_case app_name = frappe.scrub(app_name) hooks = frappe._dict() hooks.app_name = app_name app_title = hooks.app_name.replace(""_"", "" "").title() for key in (""App Title (default: {0})"".format(app_title), ""App Description"", ""App Publisher"", ""App Email"", ""App Icon (default 'octicon octicon-file-directory')"", ""App Color (default 'grey')"", ""App License (default 'MIT')""): hook_key = key.split("" ("")[0].lower().replace("" "", ""_"") hook_val = None while not hook_val: hook_val = cstr(input(key + "": "")) if not hook_val: defaults = { ""app_title"": app_title, ""app_icon"": ""octicon octicon-file-directory"", ""app_color"": ""grey"", ""app_license"": ""MIT"" } if hook_key in defaults: hook_val = defaults[hook_key] if hook_key==""app_name"" and hook_val.lower().replace("" "", ""_"") != hook_val: print(""App Name must be all lowercase and without spaces"") hook_val = """" elif hook_key==""app_title"" and not re.match(r""^(?![\W])[^\d_\s][\w -]+$"", hook_val, re.UNICODE): print(""App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores"") hook_val = """" hooks[hook_key] = hook_val frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""www"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates"", ""pages""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""templates"", ""includes"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""config""), with_init=True) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""public"", ""css"")) frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, ""public"", ""js"")) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""__init__.py""), ""w"") as f: f.write(frappe.as_unicode(init_template)) with open(os.path.join(dest, hooks.app_name, ""MANIFEST.in""), ""w"") as f: f.write(frappe.as_unicode(manifest_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, ""requirements.txt""), ""w"") as f: f.write(""# frappe -- https://github.com/frappe/frappe is installed via 'bench init'"") with open(os.path.join(dest, hooks.app_name, ""README.md""), ""w"") as f: f.write(frappe.as_unicode(""## {0}\n\n{1}\n\n#### License\n\n{2}"".format(hooks.app_title, hooks.app_description, hooks.app_license))) with open(os.path.join(dest, hooks.app_name, ""license.txt""), ""w"") as f: f.write(frappe.as_unicode(""License: "" + hooks.app_license)) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""modules.txt""), ""w"") as f: f.write(frappe.as_unicode(hooks.app_title)) # These values could contain quotes and can break string declarations # So escaping them before setting variables in setup.py and hooks.py for key in (""app_publisher"", ""app_description"", ""app_license""): hooks[key] = hooks[key].replace(""\\"", ""\\\\"").replace(""'"", ""\\'"").replace(""\"""", ""\\\"""") with open(os.path.join(dest, hooks.app_name, ""setup.py""), ""w"") as f: f.write(frappe.as_unicode(setup_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""hooks.py""), ""w"") as f: f.write(frappe.as_unicode(hooks_template.format(**hooks))) touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, ""patches.txt"")) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""config"", ""desktop.py""), ""w"") as f: f.write(frappe.as_unicode(desktop_template.format(**hooks))) with open(os.path.join(dest, hooks.app_name, hooks.app_name, ""config"", ""docs.py""), ""w"") as f: f.write(frappe.as_unicode(docs_template.format(**hooks))) app_directory = os.path.join(dest, hooks.app_name) if not no_git_init: with open(os.path.join(dest, hooks.app_name, "".gitignore""), ""w"") as f: f.write(frappe.as_unicode(gitignore_template.format(app_name = hooks.app_name))) # initialize git repository app_repo = git.Repo.init(app_directory) app_repo.git.add(A=True) app_repo.index.commit(""feat: Initialize App"") print(""'{app}' created at {path}"".format(app=app_name, path=app_directory)) " 59190,"def filterwarnings(action, message="""", category=Warning, module="""", lineno=0, append=False): """"""Insert an entry into the list of warnings filters (at the front). 'action' -- one of ""error"", ""ignore"", ""always"", ""default"", ""module"", or ""once"" 'message' -- a regex that the warning message must match 'category' -- a class that the warning must be a subclass of 'module' -- a regex that the module name must match 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """""" if action not in (""error"", ""ignore"", ""always"", ""default"", ""module"", ""once""): raise ValueError(f""invalid action: {action!r}"") if not isinstance(message, str): raise TypeError(""message must be a string"") if not isinstance(category, type): raise TypeError(""category must be a class"") if not issubclass(category, Warning): raise TypeError(""category must be a Warning subclass"") if not isinstance(module, str): raise TypeError(""module must be a string"") if not isinstance(lineno, int): raise TypeError(""lineno must be an int"") if lineno < 0: raise ValueError(""lineno must be an int >= 0"") if message or module: import re if message: message = re.compile(message, re.I) else: message = None if module: module = re.compile(module) else: module = None _add_filter(action, message, category, module, lineno, append=append) ","def filterwarnings(action, message="""", category=Warning, module="""", lineno=0, append=False): """"""Insert an entry into the list of warnings filters (at the front). 'action' -- one of ""error"", ""ignore"", ""always"", ""default"", ""module"", or ""once"" 'message' -- a regex that the warning message must match 'category' -- a class that the warning must be a subclass of 'module' -- a regex that the module name must match 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """""" if action not in (""error"", ""ignore"", ""always"", ""default"", ""module"", ""once""): raise ValueError(f""invalid action: {action!r}"") if not isinstance(message, str): raise TypeError(""message must be a string"") if not isinstance(category, type) or not issubclass(category, Warning): raise TypeError(""category must be a Warning subclass"") if not isinstance(module, str): raise TypeError(""module must be a string"") if not isinstance(lineno, int): raise TypeError(""lineno must be an int"") if lineno < 0: raise ValueError(""lineno must be an int >= 0"") if message or module: import re if message: message = re.compile(message, re.I) else: message = None if module: module = re.compile(module) else: module = None _add_filter(action, message, category, module, lineno, append=append) " 32410,"def generate_dbotscore(response: Dict) -> List: """"""Creates CommandResult object based on the contents of 'response' argument and provides DBotScore objects. Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- List A list of CommandResults objects. """""" data = response.get('data', {}) analysis = data.get('analysis', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold() reputation_map = { ""shared"": Common.DBotScore.NONE, ""unknown"": Common.DBotScore.NONE, ""whitelisted"": Common.DBotScore.GOOD, ""malicious"": Common.DBotScore.BAD, ""suspicious"": Common.DBotScore.SUSPICIOUS } returned_data = [] main_entity = None main_entity_type = None # Add the hash or URL first if submission_type == 'hash': hashes = main_object.get('hashes', {}) info = main_object.get('info', {}) file_type = info.get('file') exif = info.get('exif', {}) main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5') main_entity_type = FeedIndicatorType.File dbot_score = Common.DBotScore( indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'), indicator_type=DBotScoreType.FILE, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) returned_data.append(CommandResults( indicator=Common.File( dbot_score=dbot_score, md5=hashes.get('md5'), sha1=hashes.get('sha1'), sha256=hashes.get('sha256'), file_type=file_type, associated_file_names=exif.get('OriginalFileName') ) )) else: main_entity = main_object.get('url') main_entity_type = FeedIndicatorType.URL url_outputs = { 'Data': main_object.get('url') } dbot_score = Common.DBotScore( indicator=main_object.get('url'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) if dbot_score.score >= 2: url_outputs['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_text } returned_data.append(CommandResults( outputs_prefix='URL', outputs_key_field=['Data'], outputs=url_outputs, indicator=Common.URL( url=main_object.get('url'), dbot_score=dbot_score, ) )) # Check if network information is available in the report if 'network' in data: network_data = data.get('network') # Then add all the network-related indicators - 'connections' if 'connections' in network_data: connections = network_data.get('connections') for current_connection in connections: reputation = current_connection.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_connection.get('IP'), indicator_type=DBotScoreType.IP, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_connection.get('IP'), entity_b_type=FeedIndicatorType.IP, brand=""ANYRUN"" )] ip_indicator = Common.IP( ip=current_connection.get('IP'), asn=current_connection.get('ASN'), port=current_connection.get('Port'), geo_country=current_connection.get('Country'), dbot_score=current_dbot_score, relationships=relationships ) if current_connection.get('IP') not in [ x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_connection.get('IP')}"", [{ ""Description"": f""This IP was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=ip_indicator, relationships=relationships )) # Then add all the network-related indicators - 'dnsRequests' if 'dnsRequests' in network_data: for current_dnsRequests in network_data.get('dnsRequests'): reputation = current_dnsRequests.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_dnsRequests.get('Domain'), indicator_type=DBotScoreType.DOMAIN, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_dnsRequests.get('Domain'), entity_b_type=FeedIndicatorType.Domain, brand=""ANYRUN"" )] if ""IP"" in current_dnsRequests: for ip in current_dnsRequests.get('IP', []): relationships.append( EntityRelationship( name=EntityRelationship.Relationships.RESOLVES_TO, entity_a=current_dnsRequests.get('Domain'), entity_a_type=FeedIndicatorType.Domain, entity_b=ip, entity_b_type=FeedIndicatorType.IP ) ) domain_ip_dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name=""ANYRUN"", score=Common.DBotScore.NONE ) domain_ip_indicator = Common.IP( ip=ip, dbot_score=domain_ip_dbot_score ) returned_data.append(CommandResults( indicator=domain_ip_indicator, readable_output=tableToMarkdown( f""{ip}"", [{ ""Description"": f""This IP was resovled from {current_dnsRequests.get('Domain')}"" }] ) )) domain_indicator = Common.Domain( domain=current_dnsRequests.get('Domain'), dbot_score=current_dbot_score, relationships=relationships ) if current_dnsRequests.get('Domain') not in [ x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_dnsRequests.get('Domain')}"", [{ ""Description"": f""This domain was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=domain_indicator, relationships=relationships )) # Then add all the network-related indicators - 'httpRequests' if 'httpRequests' in network_data: for current_httpRequests in network_data.get('httpRequests'): reputation = current_httpRequests['Reputation'] if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_httpRequests.get('URL'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_httpRequests.get('URL'), entity_b_type=FeedIndicatorType.URL, brand=""ANYRUN"" )] url_indicator = Common.URL( url=current_httpRequests.get('URL'), geo_country=current_httpRequests.get('Country'), port=current_httpRequests.get('Port'), dbot_score=current_dbot_score, relationships=relationships ) if current_httpRequests.get('URL') not in [ x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_httpRequests.get('URL')}"", [{ ""Description"": f""This URL was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=url_indicator, relationships=relationships )) if 'mitre' in data: mitre_data = data.get('mitre') for item in mitre_data: relationships = [EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=item.get('name'), entity_b_type='Attack Pattern' )] attack_indicator = Common.AttackPattern( stix_id=None, value=item.get('name'), mitre_id=item.get('id') ) returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{item.get('name')}"", [{ ""Description"": f""This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=attack_indicator, relationships=relationships )) return returned_data ","def generate_dbotscore(response: Dict) -> List: """"""Creates CommandResult object based on the contents of 'response' argument and provides DBotScore objects. Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- List A list of CommandResults objects. """""" data = response.get('data', {}) analysis = data.get('analysis', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold() reputation_map = { ""shared"": Common.DBotScore.NONE, ""unknown"": Common.DBotScore.NONE, ""whitelisted"": Common.DBotScore.GOOD, ""malicious"": Common.DBotScore.BAD, ""suspicious"": Common.DBotScore.SUSPICIOUS } returned_data = [] main_entity = None main_entity_type = None # Add the hash or URL first if submission_type == 'hash': hashes = main_object.get('hashes', {}) info = main_object.get('info', {}) file_type = info.get('file') exif = info.get('exif', {}) main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5') main_entity_type = FeedIndicatorType.File dbot_score = Common.DBotScore( indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'), indicator_type=DBotScoreType.FILE, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) returned_data.append(CommandResults( indicator=Common.File( dbot_score=dbot_score, md5=hashes.get('md5'), sha1=hashes.get('sha1'), sha256=hashes.get('sha256'), file_type=file_type, associated_file_names=exif.get('OriginalFileName') ) )) else: main_entity = main_object.get('url') main_entity_type = FeedIndicatorType.URL url_outputs = { 'Data': main_object.get('url') } dbot_score = Common.DBotScore( indicator=main_object.get('url'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE ) if dbot_score.score >= 2: url_outputs['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_text } returned_data.append(CommandResults( outputs_prefix='URL', outputs_key_field=['Data'], outputs=url_outputs, indicator=Common.URL( url=main_object.get('url'), dbot_score=dbot_score, ) )) # Check if network information is available in the report if 'network' in data: network_data = data.get('network', {}) # Then add all the network-related indicators - 'connections' if 'connections' in network_data: connections = network_data.get('connections') for current_connection in connections: reputation = current_connection.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_connection.get('IP'), indicator_type=DBotScoreType.IP, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_connection.get('IP'), entity_b_type=FeedIndicatorType.IP, brand=""ANYRUN"" )] ip_indicator = Common.IP( ip=current_connection.get('IP'), asn=current_connection.get('ASN'), port=current_connection.get('Port'), geo_country=current_connection.get('Country'), dbot_score=current_dbot_score, relationships=relationships ) if current_connection.get('IP') not in [ x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_connection.get('IP')}"", [{ ""Description"": f""This IP was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=ip_indicator, relationships=relationships )) # Then add all the network-related indicators - 'dnsRequests' if 'dnsRequests' in network_data: for current_dnsRequests in network_data.get('dnsRequests'): reputation = current_dnsRequests.get('Reputation') if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_dnsRequests.get('Domain'), indicator_type=DBotScoreType.DOMAIN, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_dnsRequests.get('Domain'), entity_b_type=FeedIndicatorType.Domain, brand=""ANYRUN"" )] if ""IP"" in current_dnsRequests: for ip in current_dnsRequests.get('IP', []): relationships.append( EntityRelationship( name=EntityRelationship.Relationships.RESOLVES_TO, entity_a=current_dnsRequests.get('Domain'), entity_a_type=FeedIndicatorType.Domain, entity_b=ip, entity_b_type=FeedIndicatorType.IP ) ) domain_ip_dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name=""ANYRUN"", score=Common.DBotScore.NONE ) domain_ip_indicator = Common.IP( ip=ip, dbot_score=domain_ip_dbot_score ) returned_data.append(CommandResults( indicator=domain_ip_indicator, readable_output=tableToMarkdown( f""{ip}"", [{ ""Description"": f""This IP was resovled from {current_dnsRequests.get('Domain')}"" }] ) )) domain_indicator = Common.Domain( domain=current_dnsRequests.get('Domain'), dbot_score=current_dbot_score, relationships=relationships ) if current_dnsRequests.get('Domain') not in [ x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_dnsRequests.get('Domain')}"", [{ ""Description"": f""This domain was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=domain_indicator, relationships=relationships )) # Then add all the network-related indicators - 'httpRequests' if 'httpRequests' in network_data: for current_httpRequests in network_data.get('httpRequests'): reputation = current_httpRequests['Reputation'] if reputation in reputation_map.keys(): current_dbot_score = Common.DBotScore( indicator=current_httpRequests.get('URL'), indicator_type=DBotScoreType.URL, integration_name='ANYRUN', score=reputation_map[reputation] ) relationships = [EntityRelationship( name=EntityRelationship.Relationships.COMMUNICATED_WITH, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=current_httpRequests.get('URL'), entity_b_type=FeedIndicatorType.URL, brand=""ANYRUN"" )] url_indicator = Common.URL( url=current_httpRequests.get('URL'), geo_country=current_httpRequests.get('Country'), port=current_httpRequests.get('Port'), dbot_score=current_dbot_score, relationships=relationships ) if current_httpRequests.get('URL') not in [ x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL) ]: returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{current_httpRequests.get('URL')}"", [{ ""Description"": f""This URL was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=url_indicator, relationships=relationships )) if 'mitre' in data: mitre_data = data.get('mitre') for item in mitre_data: relationships = [EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=main_entity, entity_a_type=main_entity_type, entity_b=item.get('name'), entity_b_type='Attack Pattern' )] attack_indicator = Common.AttackPattern( stix_id=None, value=item.get('name'), mitre_id=item.get('id') ) returned_data.append(CommandResults( readable_output=tableToMarkdown( f""{item.get('name')}"", [{ ""Description"": f""This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"" }] ), indicator=attack_indicator, relationships=relationships )) return returned_data " 42734,"def test_upgrades_list_is_sane(): idx = None for list_index, entry in enumerate(UPGRADES_LIST): # When support for older versions was dropped we removed them from the list of assets. # Adjust the test to account for this shift. idx = list_index + 25 msg = ( f'{idx} upgrade record was expected to have {idx + 1} ' f'from_version but has {entry.from_version}' ) assert entry.from_version == idx + 1, msg assert idx + 2 == ROTKEHLCHEN_DB_VERSION, 'the final version + 1 should be current version' ","def test_upgrades_list_is_sane(): idx = None for idx, entry in enumerate(UPGRADES_LIST, start=MIN_USER_DB_SUPPORTED_VERSION): msg = ( f'{idx} upgrade record was expected to have {idx + 1} ' f'from_version but has {entry.from_version}' ) assert entry.from_version == idx + 1, msg assert idx + 2 == ROTKEHLCHEN_DB_VERSION, 'the final version + 1 should be current version' " 27612,"def validate_pem_format(param_name, param_argument): """"""Validate that an argument is a PEM-formatted public key or certificate :param param_name: The name of the parameter being validate. Used in any resulting exception messages. :type param_name: str | unicode :param param_argument: The argument to validate :type param_argument: str | unicode :return: True if the argument is validate False otherwise :rtype: bool """""" def _check_pem(arg): arg = arg.strip() if not arg.startswith(""-----BEGIN CERTIFICATE-----"") or not arg.endswith( ""-----END CERTIFICATE-----"" ): return False return True if isinstance(param_argument, str): param_argument = [param_argument] if not isinstance(param_argument, list) or not all( _check_pem(p) for p in param_argument ): error_msg = (f""unsupported {param_name} public key / certificate format, required type: PEM"") raise exceptions.ParamValidationError(error_msg) ","def validate_pem_format(param_name, param_argument): """"""Validate that an argument is a PEM-formatted public key or certificate :param param_name: The name of the parameter being validate. Used in any resulting exception messages. :type param_name: str | unicode :param param_argument: The argument to validate :type param_argument: str | unicode :return: True if the argument is validate False otherwise :rtype: bool """""" def _check_pem(arg): arg = arg.strip() if not arg.startswith(""-----BEGIN CERTIFICATE-----"") or not arg.endswith( ""-----END CERTIFICATE-----"" ): return False return True if isinstance(param_argument, str): param_argument = [param_argument] if not isinstance(param_argument, list) or not all( _check_pem(p) for p in param_argument ): error_msg = f""unsupported {param_name} public key / certificate format, required type: PEM"" raise exceptions.ParamValidationError(error_msg) " 32533,"def fetch_events_command(client, first_fetch, last_run, fetch_limit, fetch_delta, incidents_states): """""" Fetches incidents from the ProofPoint API. """""" last_fetch = last_run.get('last_fetch', {}) last_fetched_id = last_run.get('last_fetched_incident_id', {}) for state in incidents_states: if not last_fetch.get(state): last_fetch[state] = first_fetch if not last_fetched_id.get(state): last_fetched_id[state] = '0' incidents = [] for state in incidents_states: request_params = { 'created_after': last_fetch[state], 'last_fetched_id': last_fetched_id[state], 'fetch_delta': fetch_delta, 'state': state, 'fetch_limit': fetch_limit } id = last_fetched_id[state] incidents = get_incidents_batch_by_time_request(client, request_params) if incidents: id = incidents[-1].get('id') last_fetch_time = incidents[-1]['created_at'] last_fetch[state] = \ (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split('.')[0] + 'Z' last_fetched_id[state] = id demisto.debug(f""End of current fetch function with last_fetch {str(last_fetch)} and last_fetched_id"" f"" {str(last_fetched_id)}"") last_run = { 'last_fetch': last_fetch, 'last_fetched_incident_id': last_fetched_id } demisto.info(f'extracted {len(incidents)} events') return incidents, last_run ","def fetch_events_command(client, first_fetch, last_run, fetch_limit, fetch_delta, incidents_states): """""" Fetches incidents from the ProofPoint API. """""" last_fetch = last_run.get('last_fetch', {}) last_fetched_id = last_run.get('last_fetched_incident_id', {}) for state in incidents_states: if not last_fetch.get(state): last_fetch[state] = first_fetch if not last_fetched_id.get(state): last_fetched_id[state] = '0' events = [] for state in incidents_states: request_params = { 'created_after': last_fetch[state], 'last_fetched_id': last_fetched_id[state], 'fetch_delta': fetch_delta, 'state': state, 'fetch_limit': fetch_limit } id = last_fetched_id[state] incidents = get_incidents_batch_by_time_request(client, request_params) if incidents: id = incidents[-1].get('id') last_fetch_time = incidents[-1]['created_at'] last_fetch[state] = \ (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split('.')[0] + 'Z' last_fetched_id[state] = id demisto.debug(f""End of current fetch function with last_fetch {str(last_fetch)} and last_fetched_id"" f"" {str(last_fetched_id)}"") last_run = { 'last_fetch': last_fetch, 'last_fetched_incident_id': last_fetched_id } demisto.info(f'extracted {len(incidents)} events') return incidents, last_run " 34661,"def test_model_finetuning_core_new_domain_label( tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, ): mocked_core_training = AsyncMock() monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) (tmp_path / ""models"").mkdir() output = str(tmp_path / ""models"") old_domain = rasa.shared.utils.io.read_yaml_file(""examples/moodbot/domain.yml"") old_domain[""intents""].append(""a_new_one"") new_domain_path = tmp_path / ""new_domain.yml"" rasa.shared.utils.io.write_yaml(old_domain, new_domain_path) with pytest.raises(SystemExit): train_core( domain=str(new_domain_path), config=""examples/moodbot/config.yml"", stories=""examples/moodbot/data/stories.yml"", output=output, model_to_finetune=trained_moodbot_path, ) mocked_core_training.assert_not_called() ","def test_model_finetuning_core_new_domain_label( tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, ): mocked_core_training = AsyncMock() monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) (tmp_path / ""models"").mkdir() output = str(tmp_path / ""models"") # Simulate addition to training data old_domain = rasa.shared.utils.io.read_yaml_file(""examples/moodbot/domain.yml"") old_domain[""intents""].append(""a_new_one"") new_domain_path = tmp_path / ""new_domain.yml"" rasa.shared.utils.io.write_yaml(old_domain, new_domain_path) with pytest.raises(SystemExit): train_core( domain=str(new_domain_path), config=""examples/moodbot/config.yml"", stories=""examples/moodbot/data/stories.yml"", output=output, model_to_finetune=trained_moodbot_path, ) mocked_core_training.assert_not_called() " 47280,"def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """""" Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """""" if not len(predictions) == 5: raise ValueError(""`predictions` should be a tuple with five elements."") start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions if not len(predictions[0]) == len(features): raise ValueError(f""Got {len(predictions[0])} predictions and {len(features)} features."") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples[""id""])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature[""example_id""]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() if version_2_with_negative else None # Logging. logger.setLevel(log_level) logger.info(f""Post-processing {len(examples)} example predictions split into {len(features)} features."") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_log_prob = start_top_log_probs[feature_index] start_indexes = start_top_index[feature_index] end_log_prob = end_top_log_probs[feature_index] end_indexes = end_top_index[feature_index] feature_null_score = cls_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index][""offset_mapping""] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get(""token_is_max_context"", None) # Update minimum null prediction if min_null_score is None or feature_null_score < min_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. for i in range(start_n_top): for j in range(end_n_top): start_index = int(start_indexes[i]) j_index = i * end_n_top + j end_index = int(end_indexes[j_index]) # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the # p_mask but let's not take any risk) if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length negative or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { ""offsets"": (offset_mapping[start_index][0], offset_mapping[end_index][1]), ""score"": start_log_prob[i] + end_log_prob[j_index], ""start_log_prob"": start_log_prob[i], ""end_log_prob"": end_log_prob[j_index], } ) # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x[""score""], reverse=True)[:n_best_size] # Use the offsets to gather the answer text in the original context. context = example[""context""] for pred in predictions: offsets = pred.pop(""offsets"") pred[""text""] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0: predictions.insert(0, {""text"": """", ""start_logit"": -1e-6, ""end_logit"": -1e-6, ""score"": -2e-6}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop(""score"") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred[""probability""] = prob # Pick the best prediction and set the probability for the null answer. all_predictions[example[""id""]] = predictions[0][""text""] if version_2_with_negative: scores_diff_json[example[""id""]] = float(min_null_score) # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example[""id""]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f""{output_dir} is not a directory."") prediction_file = os.path.join( output_dir, ""predictions.json"" if prefix is None else f""{prefix}_predictions.json"" ) nbest_file = os.path.join( output_dir, ""nbest_predictions.json"" if prefix is None else f""{prefix}_nbest_predictions.json"" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, ""null_odds.json"" if prefix is None else f""{prefix}_null_odds.json"" ) logger.info(f""Saving predictions to {prediction_file}."") with open(prediction_file, ""w"") as writer: writer.write(json.dumps(all_predictions, indent=4) + ""\n"") logger.info(f""Saving nbest_preds to {nbest_file}."") with open(nbest_file, ""w"") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + ""\n"") if version_2_with_negative: logger.info(f""Saving null_odds to {null_odds_file}."") with open(null_odds_file, ""w"") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + ""\n"") return all_predictions, scores_diff_json ","def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """""" Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """""" if len(predictions) != 5: raise ValueError(""`predictions` should be a tuple with five elements."") start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions if not len(predictions[0]) == len(features): raise ValueError(f""Got {len(predictions[0])} predictions and {len(features)} features."") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples[""id""])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature[""example_id""]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() if version_2_with_negative else None # Logging. logger.setLevel(log_level) logger.info(f""Post-processing {len(examples)} example predictions split into {len(features)} features."") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_log_prob = start_top_log_probs[feature_index] start_indexes = start_top_index[feature_index] end_log_prob = end_top_log_probs[feature_index] end_indexes = end_top_index[feature_index] feature_null_score = cls_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index][""offset_mapping""] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get(""token_is_max_context"", None) # Update minimum null prediction if min_null_score is None or feature_null_score < min_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. for i in range(start_n_top): for j in range(end_n_top): start_index = int(start_indexes[i]) j_index = i * end_n_top + j end_index = int(end_indexes[j_index]) # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the # p_mask but let's not take any risk) if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length negative or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { ""offsets"": (offset_mapping[start_index][0], offset_mapping[end_index][1]), ""score"": start_log_prob[i] + end_log_prob[j_index], ""start_log_prob"": start_log_prob[i], ""end_log_prob"": end_log_prob[j_index], } ) # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x[""score""], reverse=True)[:n_best_size] # Use the offsets to gather the answer text in the original context. context = example[""context""] for pred in predictions: offsets = pred.pop(""offsets"") pred[""text""] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0: predictions.insert(0, {""text"": """", ""start_logit"": -1e-6, ""end_logit"": -1e-6, ""score"": -2e-6}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop(""score"") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred[""probability""] = prob # Pick the best prediction and set the probability for the null answer. all_predictions[example[""id""]] = predictions[0][""text""] if version_2_with_negative: scores_diff_json[example[""id""]] = float(min_null_score) # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example[""id""]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f""{output_dir} is not a directory."") prediction_file = os.path.join( output_dir, ""predictions.json"" if prefix is None else f""{prefix}_predictions.json"" ) nbest_file = os.path.join( output_dir, ""nbest_predictions.json"" if prefix is None else f""{prefix}_nbest_predictions.json"" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, ""null_odds.json"" if prefix is None else f""{prefix}_null_odds.json"" ) logger.info(f""Saving predictions to {prediction_file}."") with open(prediction_file, ""w"") as writer: writer.write(json.dumps(all_predictions, indent=4) + ""\n"") logger.info(f""Saving nbest_preds to {nbest_file}."") with open(nbest_file, ""w"") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + ""\n"") if version_2_with_negative: logger.info(f""Saving null_odds to {null_odds_file}."") with open(null_odds_file, ""w"") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + ""\n"") return all_predictions, scores_diff_json " 211,"def test_get_builds_json(bodhi_container, db_container): """"""Test ``/builds`` path"""""" # Fetch builds(of latest update) from DB query_updates = ( ""SELECT "" "" id, "" "" alias "" ""FROM updates "" ""ORDER BY date_submitted DESC LIMIT 1"" ) query_builds = ( ""SELECT "" "" nvr, "" "" release_id, "" "" signed, "" "" type, "" "" epoch "" ""FROM builds "" ""WHERE update_id = %s LIMIT 1"" ) db_ip = db_container.get_IPv4s()[0] conn = psycopg2.connect(""dbname=bodhi2 user=postgres host={}"".format(db_ip)) with conn: with conn.cursor() as curs: curs.execute(query_updates) row = curs.fetchone() update_id = row[0] update_alias = row[1] curs.execute(query_builds, (update_id, )) builds = [] for row in curs.fetchall(): build = {} for value, description in zip(row, curs.description): build[description.name] = value builds.append(build) conn.close() # GET on builds of lates update with bodhi_container.http_client(port=""8080"") as c: http_response = c.get(f""/builds/?updates={update_alias}"") default_rows_per_page = 20 expected_json = { ""builds"": builds, ""page"": 1, ""pages"": int(math.ceil(len(builds) / float(default_rows_per_page))), ""rows_per_page"": default_rows_per_page, ""total"": len(builds), } try: assert http_response.ok assert expected_json == http_response.json() except AssertionError: print(http_response) print(http_response.text) with read_file(bodhi_container, ""/httpdir/errorlog"") as log: print(log.read()) raise ","def test_get_builds_json(bodhi_container, db_container): """"""Test ``/builds`` path"""""" # Fetch builds (of latest update) from DB query_updates = ( ""SELECT "" "" id, "" "" alias "" ""FROM updates "" ""ORDER BY date_submitted DESC LIMIT 1"" ) query_builds = ( ""SELECT "" "" nvr, "" "" release_id, "" "" signed, "" "" type, "" "" epoch "" ""FROM builds "" ""WHERE update_id = %s LIMIT 1"" ) db_ip = db_container.get_IPv4s()[0] conn = psycopg2.connect(""dbname=bodhi2 user=postgres host={}"".format(db_ip)) with conn: with conn.cursor() as curs: curs.execute(query_updates) row = curs.fetchone() update_id = row[0] update_alias = row[1] curs.execute(query_builds, (update_id, )) builds = [] for row in curs.fetchall(): build = {} for value, description in zip(row, curs.description): build[description.name] = value builds.append(build) conn.close() # GET on builds of lates update with bodhi_container.http_client(port=""8080"") as c: http_response = c.get(f""/builds/?updates={update_alias}"") default_rows_per_page = 20 expected_json = { ""builds"": builds, ""page"": 1, ""pages"": int(math.ceil(len(builds) / float(default_rows_per_page))), ""rows_per_page"": default_rows_per_page, ""total"": len(builds), } try: assert http_response.ok assert expected_json == http_response.json() except AssertionError: print(http_response) print(http_response.text) with read_file(bodhi_container, ""/httpdir/errorlog"") as log: print(log.read()) raise " 32609,"def branch_create_command(client: Client, args: Dict) -> CommandResults: repo = args.get('repo', None) name = args.get('name', None) target_branch = args.get('target_branch', None) if not repo: repo = client.repository response = client.branch_create_request(name, target_branch, repo) return CommandResults( readable_output=f'The branch {name} was created successfully.', outputs_prefix='Bitbucket.Branch', outputs=response, raw_response=response ) ","def branch_create_command(client: Client, args: Dict) -> CommandResults: repo = args.get('repo', None) name = args.get('name') target_branch = args.get('target_branch', None) if not repo: repo = client.repository response = client.branch_create_request(name, target_branch, repo) return CommandResults( readable_output=f'The branch {name} was created successfully.', outputs_prefix='Bitbucket.Branch', outputs=response, raw_response=response ) " 47464,"def find_labels(model_class): """""" Find the labels used by a given model. Args: model_class (`type`): The class of the model. """""" model_name = model_class.__name__ if model_name.startswith(""TF""): signature = inspect.signature(model_class.call) elif model_name.startswith(""Flax""): signature = inspect.signature(model_class.__call__) else: signature = inspect.signature(model_class.forward) if ""QuestionAnswering"" in model_name: return [p for p in signature.parameters if ""label"" in p or p == ""start_positions"" or p == ""end_positions""] else: return [p for p in signature.parameters if ""label"" in p] ","def find_labels(model_class): """""" Find the labels used by a given model. Args: model_class (`type`): The class of the model. """""" model_name = model_class.__name__ if model_name.startswith(""TF""): signature = inspect.signature(model_class.call) elif model_name.startswith(""Flax""): signature = inspect.signature(model_class.__call__) else: signature = inspect.signature(model_class.forward) if ""QuestionAnswering"" in model_name: return [p for p in signature.parameters if ""label"" in p or p in (""start_positions"", ""end_positions"")] else: return [p for p in signature.parameters if ""label"" in p] " 22711,"def check_owner(file_path): # type: (str) -> bool """""" Check if given file is owner by current user. :param str file_path: File path to check :rtype: bool :return: True if given file is owned by current user, False otherwise. """""" if POSIX_MODE: return os.stat(file_path).st_uid == os.getuid() # Get owner sid of the file security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION) user = security.GetSecurityDescriptorOwner() # Compare sids return _get_current_user() == user ","def check_owner(file_path): # type: (str) -> bool """""" Check if given file is owned by current user. :param str file_path: File path to check :rtype: bool :return: True if given file is owned by current user, False otherwise. """""" if POSIX_MODE: return os.stat(file_path).st_uid == os.getuid() # Get owner sid of the file security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION) user = security.GetSecurityDescriptorOwner() # Compare sids return _get_current_user() == user " 25921,"def load_arguments(self, _): from azure.mgmt.redis.models import RebootType, RedisKeyType, SkuName, TlsVersion, ReplicationRole from azure.cli.command_modules.redis._validators import JsonString, ScheduleEntryList, validate_list_of_integers from azure.cli.command_modules.redis.custom import allowed_c_family_sizes, allowed_p_family_sizes from azure.cli.core.commands.parameters import get_enum_type, tags_type, zones_type from azure.cli.core.commands.parameters import get_resource_name_completion_list with self.argument_context('redis') as c: cache_name = CLIArgumentType(options_list=['--name', '-n'], help='Name of the Redis cache.', id_part='name', completer=get_resource_name_completion_list('Microsoft.Cache/redis')) format_type = CLIArgumentType(options_list=['--file-format'], help='Format of the blob (Currently rdb is the only supported format, with other formats expected in the future)') c.argument('name', arg_type=cache_name) c.argument('redis_configuration', help='JSON encoded configuration settings. Use @{file} to load from a file.', type=JsonString) c.argument('reboot_type', arg_type=get_enum_type(RebootType)) c.argument('key_type', arg_type=get_enum_type(RedisKeyType)) c.argument('files', help='SAS url for blobs that needs to be imported', nargs='+') c.argument('format', arg_type=format_type) c.argument('file_format', arg_type=format_type) c.argument('container', help='SAS url for container where data needs to be exported to') c.argument('prefix', help='Prefix to use for exported files') c.argument('cache_name', arg_type=cache_name) c.argument('shard_count', type=int, help='The number of shards to be created on a Premium Cluster Cache.') c.argument('subnet_id', help='The full resource ID of a subnet in a virtual network to deploy the redis cache in. Example format /subscriptions/{subid}/resourceGroups/{resourceGroupName}/providers/Microsoft.{Network|ClassicNetwork}/virtualNetworks/vnet1/subnets/subnet1') c.argument('static_ip', help='Specify a static ip if required for the VNET. If you do not specify a static IP then an IP address is chosen automatically') c.argument('tenant_settings', arg_type=tags_type, help='Space-separated tenant settings in key[=value] format') c.argument('tags', arg_type=tags_type) c.argument('zones', arg_type=zones_type) c.argument('shard_id', type=int) c.argument('sku', help='Type of Redis cache.', arg_type=get_enum_type(SkuName)) c.argument('minimum_tls_version', help='Specifies the TLS version required by clients to connect to cache', arg_type=get_enum_type(TlsVersion)) c.argument('vm_size', arg_type=get_enum_type(allowed_c_family_sizes + allowed_p_family_sizes), help='Size of Redis cache to deploy. Basic and Standard Cache sizes start with C. Premium Cache sizes start with P') c.argument('enable_non_ssl_port', action='store_true', help='If specified, then the non-ssl redis server port (6379) will be enabled.') c.argument('replicas_per_master', help='The number of replicas to be created per master.', is_preview=True) c.argument('ports', type=validate_list_of_integers, help='Specifies the ports of nodes to be rebooted. Comma separated list of integers.', is_preview=True) with self.argument_context('redis firewall-rules list') as c: c.argument('cache_name', arg_type=cache_name, id_part=None) c.argument('rule_name', help='Name of the firewall rule') with self.argument_context('redis server-link') as c: c.argument('name', arg_type=cache_name, id_part=None) c.argument('server_to_link', help='Resource ID or name of the redis cache to be linked') c.argument('replication_role', help='Role of the redis cache to be linked', arg_type=get_enum_type(ReplicationRole)) c.argument('linked_server_name', help='Name of the linked redis cache') with self.argument_context('redis patch-schedule') as c: c.argument('name', arg_type=cache_name, id_part=None) c.argument('schedule_entries', help=""List of Patch schedule entries. Example Value:[{\""dayOfWeek\"":\""Monday\"",\""startHourUtc\"":\""00\"",\""maintenanceWindow\"":\""PT5H\""}]"", type=ScheduleEntryList) ","def load_arguments(self, _): from azure.mgmt.redis.models import RebootType, RedisKeyType, SkuName, TlsVersion, ReplicationRole from azure.cli.command_modules.redis._validators import JsonString, ScheduleEntryList, validate_list_of_integers from azure.cli.command_modules.redis.custom import allowed_c_family_sizes, allowed_p_family_sizes from azure.cli.core.commands.parameters import get_enum_type, tags_type, zones_type from azure.cli.core.commands.parameters import get_resource_name_completion_list with self.argument_context('redis') as c: cache_name = CLIArgumentType(options_list=['--name', '-n'], help='Name of the Redis cache.', id_part='name', completer=get_resource_name_completion_list('Microsoft.Cache/redis')) format_type = CLIArgumentType(options_list=['--file-format'], help='Format of the blob (Currently rdb is the only supported format, with other formats expected in the future)') c.argument('name', arg_type=cache_name) c.argument('redis_configuration', help='JSON encoded configuration settings. Use @{file} to load from a file.', type=JsonString) c.argument('reboot_type', arg_type=get_enum_type(RebootType)) c.argument('key_type', arg_type=get_enum_type(RedisKeyType)) c.argument('files', help='SAS url for blobs that needs to be imported', nargs='+') c.argument('format', arg_type=format_type) c.argument('file_format', arg_type=format_type) c.argument('container', help='SAS url for container where data needs to be exported to') c.argument('prefix', help='Prefix to use for exported files') c.argument('cache_name', arg_type=cache_name) c.argument('shard_count', type=int, help='The number of shards to be created on a Premium Cluster Cache.') c.argument('subnet_id', help='The full resource ID of a subnet in a virtual network to deploy the redis cache in. Example format /subscriptions/{subid}/resourceGroups/{resourceGroupName}/providers/Microsoft.{Network|ClassicNetwork}/virtualNetworks/vnet1/subnets/subnet1') c.argument('static_ip', help='Specify a static ip if required for the VNET. If you do not specify a static IP then an IP address is chosen automatically') c.argument('tenant_settings', arg_type=tags_type, help='Space-separated tenant settings in key[=value] format') c.argument('tags', arg_type=tags_type) c.argument('zones', arg_type=zones_type) c.argument('shard_id', type=int) c.argument('sku', help='Type of Redis cache.', arg_type=get_enum_type(SkuName)) c.argument('minimum_tls_version', help='Specifies the TLS version required by clients to connect to cache', arg_type=get_enum_type(TlsVersion)) c.argument('vm_size', arg_type=get_enum_type(allowed_c_family_sizes + allowed_p_family_sizes), help='Size of Redis cache to deploy. Basic and Standard Cache sizes start with C. Premium Cache sizes start with P') c.argument('enable_non_ssl_port', action='store_true', help='If specified, then the non-ssl redis server port (6379) will be enabled.') c.argument('replicas_per_master', help='The number of replicas to be created per master.', is_preview=True) c.argument('ports', type=validate_list_of_integers, help='Specify the ports of nodes to be rebooted. Comma separated list of integers.', is_preview=True) with self.argument_context('redis firewall-rules list') as c: c.argument('cache_name', arg_type=cache_name, id_part=None) c.argument('rule_name', help='Name of the firewall rule') with self.argument_context('redis server-link') as c: c.argument('name', arg_type=cache_name, id_part=None) c.argument('server_to_link', help='Resource ID or name of the redis cache to be linked') c.argument('replication_role', help='Role of the redis cache to be linked', arg_type=get_enum_type(ReplicationRole)) c.argument('linked_server_name', help='Name of the linked redis cache') with self.argument_context('redis patch-schedule') as c: c.argument('name', arg_type=cache_name, id_part=None) c.argument('schedule_entries', help=""List of Patch schedule entries. Example Value:[{\""dayOfWeek\"":\""Monday\"",\""startHourUtc\"":\""00\"",\""maintenanceWindow\"":\""PT5H\""}]"", type=ScheduleEntryList) " 3578,"def _create_intersphinx_data(version, commit, build): """""" Create intersphinx data for this version. :param version: Version instance :param commit: Commit that updated path :param build: Build id """""" if not version.is_sphinx_type: return html_storage_path = version.project.get_storage_path( type_='html', version_slug=version.slug, include_file=False ) json_storage_path = version.project.get_storage_path( type_='json', version_slug=version.slug, include_file=False ) object_file = build_media_storage.join(html_storage_path, 'objects.inv') if not build_media_storage.exists(object_file): log.debug('No objects.inv, skipping intersphinx indexing.') return type_file = build_media_storage.join(json_storage_path, 'readthedocs-sphinx-domain-names.json') types = {} titles = {} if build_media_storage.exists(type_file): try: data = json.load(build_media_storage.open(type_file)) types = data['types'] titles = data['titles'] except Exception: log.exception('Exception parsing readthedocs-sphinx-domain-names.json') # These classes are copied from Sphinx # https://github.com/sphinx-doc/sphinx/blob/d79d041f4f90818e0b495523fdcc28db12783caf/sphinx/ext/intersphinx.py#L400-L403 # noqa class MockConfig: intersphinx_timeout = None tls_verify = False user_agent = None class MockApp: srcdir = '' config = MockConfig() def warn(self, msg): log.warning('Sphinx MockApp.', msg=msg) # Re-create all objects from the new build of the version object_file_url = build_media_storage.url(object_file) if object_file_url.startswith('/'): # Filesystem backed storage simply prepends MEDIA_URL to the path to get the URL # This can cause an issue if MEDIA_URL is not fully qualified object_file_url = settings.RTD_INTERSPHINX_URL + object_file_url invdata = intersphinx.fetch_inventory(MockApp(), '', object_file_url) for key, value in sorted(invdata.items() or {}): domain, _type = key.split(':', 1) for name, einfo in sorted(value.items()): # project, version, url, display_name # ('Sphinx', '1.7.9', 'faq.html#epub-faq', 'Epub info') try: url = einfo[2] if '#' in url: doc_name, anchor = url.split( '#', # The anchor can contain ``#`` characters maxsplit=1 ) else: doc_name, anchor = url, '' display_name = einfo[3] except Exception: log.exception( 'Error while getting sphinx domain information. Skipping...', project_slug=version.project.slug, version_slug=version.slug, sphinx_domain=f'{domain}->{name}', ) continue # HACK: This is done because the difference between # ``sphinx.builders.html.StandaloneHTMLBuilder`` # and ``sphinx.builders.dirhtml.DirectoryHTMLBuilder``. # They both have different ways of generating HTML Files, # and therefore the doc_name generated is different. # More info on: http://www.sphinx-doc.org/en/master/usage/builders/index.html#builders # Also see issue: https://github.com/readthedocs/readthedocs.org/issues/5821 if doc_name.endswith('/'): doc_name += 'index.html' html_file = HTMLFile.objects.filter( project=version.project, version=version, path=doc_name, build=build, ).first() if not html_file: log.debug( 'HTMLFile object not found.', project_slug=version.project.slug, version_slug=version.slug, build_id=build, doc_name=doc_name ) # Don't create Sphinx Domain objects # if the HTMLFile object is not found. continue SphinxDomain.objects.create( project=version.project, version=version, html_file=html_file, domain=domain, name=name, display_name=display_name, type=_type, type_display=types.get(f'{domain}:{_type}', ''), doc_name=doc_name, doc_display=titles.get(doc_name, ''), anchor=anchor, commit=commit, build=build, ) ","def _create_intersphinx_data(version, commit, build): """""" Create intersphinx data for this version. :param version: Version instance :param commit: Commit that updated path :param build: Build id """""" if not version.is_sphinx_type: return html_storage_path = version.project.get_storage_path( type_='html', version_slug=version.slug, include_file=False ) json_storage_path = version.project.get_storage_path( type_='json', version_slug=version.slug, include_file=False ) object_file = build_media_storage.join(html_storage_path, 'objects.inv') if not build_media_storage.exists(object_file): log.debug('No objects.inv, skipping intersphinx indexing.') return type_file = build_media_storage.join(json_storage_path, 'readthedocs-sphinx-domain-names.json') types = {} titles = {} if build_media_storage.exists(type_file): try: data = json.load(build_media_storage.open(type_file)) types = data['types'] titles = data['titles'] except Exception: log.exception('Exception parsing readthedocs-sphinx-domain-names.json') # These classes are copied from Sphinx # https://github.com/sphinx-doc/sphinx/blob/d79d041f4f90818e0b495523fdcc28db12783caf/sphinx/ext/intersphinx.py#L400-L403 # noqa class MockConfig: intersphinx_timeout = None tls_verify = False user_agent = None class MockApp: srcdir = '' config = MockConfig() def warn(self, msg): log.warning('Sphinx MockApp.', msg=msg) # Re-create all objects from the new build of the version object_file_url = build_media_storage.url(object_file) if object_file_url.startswith('/'): # Filesystem backed storage simply prepends MEDIA_URL to the path to get the URL # This can cause an issue if MEDIA_URL is not fully qualified object_file_url = settings.RTD_INTERSPHINX_URL + object_file_url invdata = intersphinx.fetch_inventory(MockApp(), '', object_file_url) for key, value in sorted(invdata.items() or {}): domain, _type = key.split(':', 1) for name, einfo in sorted(value.items()): # project, version, url, display_name # ('Sphinx', '1.7.9', 'faq.html#epub-faq', 'Epub info') try: url = einfo[2] if '#' in url: doc_name, anchor = url.split( '#', # The anchor can contain ``#`` characters maxsplit=1 ) else: doc_name, anchor = url, '' display_name = einfo[3] except Exception: log.exception( 'Error while getting sphinx domain information. Skipping...', project_slug=version.project.slug, version_slug=version.slug, sphinx_domain=f""{domain}->{name}"", ) continue # HACK: This is done because the difference between # ``sphinx.builders.html.StandaloneHTMLBuilder`` # and ``sphinx.builders.dirhtml.DirectoryHTMLBuilder``. # They both have different ways of generating HTML Files, # and therefore the doc_name generated is different. # More info on: http://www.sphinx-doc.org/en/master/usage/builders/index.html#builders # Also see issue: https://github.com/readthedocs/readthedocs.org/issues/5821 if doc_name.endswith('/'): doc_name += 'index.html' html_file = HTMLFile.objects.filter( project=version.project, version=version, path=doc_name, build=build, ).first() if not html_file: log.debug( 'HTMLFile object not found.', project_slug=version.project.slug, version_slug=version.slug, build_id=build, doc_name=doc_name ) # Don't create Sphinx Domain objects # if the HTMLFile object is not found. continue SphinxDomain.objects.create( project=version.project, version=version, html_file=html_file, domain=domain, name=name, display_name=display_name, type=_type, type_display=types.get(f'{domain}:{_type}', ''), doc_name=doc_name, doc_display=titles.get(doc_name, ''), anchor=anchor, commit=commit, build=build, ) " 1790,"def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2, output_dict=False, zero_division=""warn""): """"""Build a text report showing the main classification metrics. Read more in the :ref:`User Guide `. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape (n_samples,), default=None Sample weights. digits : int Number of digits for formatting output floating point values. When ``output_dict`` is ``True``, this will be ignored and the returned values will not be rounded. output_dict : bool (default = False) If True, return output as dict zero_division : ""warn"", 0 or 1, default=""warn"" Sets the value to return when there is a zero division. If set to ""warn"", this acts as 0, but warnings are also raised. Returns ------- report : string / dict Text summary of the precision, recall, F1 score for each class. Dictionary returned if output_dict is True. Dictionary has the following structure:: {'label 1': {'precision':0.5, 'recall':1.0, 'f1-score':0.67, 'support':1}, 'label 2': { ... }, ... } The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class with a subset of classes, because it corresponds to accuracy otherwise and would be constant for all metrics. See also :func:`precision_recall_fscore_support` for more details on averages. Note that in binary classification, recall of the positive class is also known as ""sensitivity""; recall of the negative class is ""specificity"". See also -------- precision_recall_fscore_support, confusion_matrix, multilabel_confusion_matrix Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 accuracy 0.60 5 macro avg 0.50 0.56 0.49 5 weighted avg 0.70 0.60 0.61 5 >>> y_pred = [1, 1, 0] >>> y_true = [1, 1, 1] >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3])) precision recall f1-score support 1 1.00 0.67 0.80 3 2 0.00 0.00 0.00 0 3 0.00 0.00 0.00 0 micro avg 1.00 0.67 0.80 3 macro avg 0.33 0.22 0.27 3 weighted avg 1.00 0.67 0.80 3 """""" y_type, y_true, y_pred = _check_targets(y_true, y_pred) if labels is None: labels = unique_labels(y_true, y_pred) labels_given = False else: labels = np.asarray(labels) labels_given = True # labelled micro average micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and (not labels_given or (set(labels) == set(unique_labels(y_true, y_pred))))) if target_names is not None and len(labels) != len(target_names): if labels_given: warnings.warn( ""labels size, {0}, does not match size of target_names, {1}"" .format(len(labels), len(target_names)) ) else: raise ValueError( ""Number of classes, {0}, does not match size of "" ""target_names, {1}. Try specifying the labels "" ""parameter"".format(len(labels), len(target_names)) ) if target_names is None: target_names = ['%s' % l for l in labels] headers = [""precision"", ""recall"", ""f1-score"", ""support""] # compute per-class results without averaging p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight, zero_division=zero_division) rows = zip(target_names, p, r, f1, s) if y_type.startswith('multilabel'): average_options = ('micro', 'macro', 'weighted', 'samples') else: average_options = ('micro', 'macro', 'weighted') if output_dict: report_dict = {label[0]: label[1:] for label in rows} for label, scores in report_dict.items(): report_dict[label] = dict(zip(headers, [i.item() for i in scores])) else: longest_last_line_heading = 'weighted avg' name_width = max(len(cn) for cn in target_names) width = max(name_width, len(longest_last_line_heading), digits) head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers) report = head_fmt.format('', *headers, width=width) report += '\n\n' row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n' for row in rows: report += row_fmt.format(*row, width=width, digits=digits) report += '\n' # compute all applicable averages for average in average_options: if average.startswith('micro') and micro_is_accuracy: line_heading = 'accuracy' else: line_heading = average + ' avg' # compute averages with specified averaging method avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support( y_true, y_pred, labels=labels, average=average, sample_weight=sample_weight, zero_division=zero_division) avg = [avg_p, avg_r, avg_f1, np.sum(s)] if output_dict: report_dict[line_heading] = dict( zip(headers, [i.item() for i in avg])) else: if line_heading == 'accuracy': row_fmt_accuracy = '{:>{width}s} ' + \ ' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \ ' {:>9}\n' report += row_fmt_accuracy.format(line_heading, '', '', *avg[2:], width=width, digits=digits) else: report += row_fmt.format(line_heading, *avg, width=width, digits=digits) if output_dict: if 'accuracy' in report_dict.keys(): report_dict['accuracy'] = report_dict['accuracy']['precision'] return report_dict else: return report ","def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2, output_dict=False, zero_division=""warn""): """"""Build a text report showing the main classification metrics. Read more in the :ref:`User Guide `. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape (n_samples,), default=None Sample weights. digits : int Number of digits for formatting output floating point values. When ``output_dict`` is ``True``, this will be ignored and the returned values will not be rounded. output_dict : bool (default = False) If True, return output as dict zero_division : ""warn"", 0 or 1, default=""warn"" Sets the value to return when there is a zero division. If set to ""warn"", this acts as 0, but warnings are also raised. Returns ------- report : string / dict Text summary of the precision, recall, F1 score for each class. Dictionary returned if output_dict is True. Dictionary has the following structure:: {'label 1': {'precision':0.5, 'recall':1.0, 'f1-score':0.67, 'support':1}, 'label 2': { ... }, ... } The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class with a subset of classes, because it corresponds to accuracy otherwise and would be the same for all metrics. See also :func:`precision_recall_fscore_support` for more details on averages. Note that in binary classification, recall of the positive class is also known as ""sensitivity""; recall of the negative class is ""specificity"". See also -------- precision_recall_fscore_support, confusion_matrix, multilabel_confusion_matrix Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 accuracy 0.60 5 macro avg 0.50 0.56 0.49 5 weighted avg 0.70 0.60 0.61 5 >>> y_pred = [1, 1, 0] >>> y_true = [1, 1, 1] >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3])) precision recall f1-score support 1 1.00 0.67 0.80 3 2 0.00 0.00 0.00 0 3 0.00 0.00 0.00 0 micro avg 1.00 0.67 0.80 3 macro avg 0.33 0.22 0.27 3 weighted avg 1.00 0.67 0.80 3 """""" y_type, y_true, y_pred = _check_targets(y_true, y_pred) if labels is None: labels = unique_labels(y_true, y_pred) labels_given = False else: labels = np.asarray(labels) labels_given = True # labelled micro average micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and (not labels_given or (set(labels) == set(unique_labels(y_true, y_pred))))) if target_names is not None and len(labels) != len(target_names): if labels_given: warnings.warn( ""labels size, {0}, does not match size of target_names, {1}"" .format(len(labels), len(target_names)) ) else: raise ValueError( ""Number of classes, {0}, does not match size of "" ""target_names, {1}. Try specifying the labels "" ""parameter"".format(len(labels), len(target_names)) ) if target_names is None: target_names = ['%s' % l for l in labels] headers = [""precision"", ""recall"", ""f1-score"", ""support""] # compute per-class results without averaging p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight, zero_division=zero_division) rows = zip(target_names, p, r, f1, s) if y_type.startswith('multilabel'): average_options = ('micro', 'macro', 'weighted', 'samples') else: average_options = ('micro', 'macro', 'weighted') if output_dict: report_dict = {label[0]: label[1:] for label in rows} for label, scores in report_dict.items(): report_dict[label] = dict(zip(headers, [i.item() for i in scores])) else: longest_last_line_heading = 'weighted avg' name_width = max(len(cn) for cn in target_names) width = max(name_width, len(longest_last_line_heading), digits) head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers) report = head_fmt.format('', *headers, width=width) report += '\n\n' row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n' for row in rows: report += row_fmt.format(*row, width=width, digits=digits) report += '\n' # compute all applicable averages for average in average_options: if average.startswith('micro') and micro_is_accuracy: line_heading = 'accuracy' else: line_heading = average + ' avg' # compute averages with specified averaging method avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support( y_true, y_pred, labels=labels, average=average, sample_weight=sample_weight, zero_division=zero_division) avg = [avg_p, avg_r, avg_f1, np.sum(s)] if output_dict: report_dict[line_heading] = dict( zip(headers, [i.item() for i in avg])) else: if line_heading == 'accuracy': row_fmt_accuracy = '{:>{width}s} ' + \ ' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \ ' {:>9}\n' report += row_fmt_accuracy.format(line_heading, '', '', *avg[2:], width=width, digits=digits) else: report += row_fmt.format(line_heading, *avg, width=width, digits=digits) if output_dict: if 'accuracy' in report_dict.keys(): report_dict['accuracy'] = report_dict['accuracy']['precision'] return report_dict else: return report " 31783,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": str(timestamp), ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": timestamp, ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() " 21162,"def git_sparse_checkout(repo, subpath, dest, branch): # We're using Git, partial clone and sparse checkout to # only clone the files we need # This ends up being RIDICULOUS. omg. # So, every tutorial and SO post talks about 'sparse checkout'...But they # go and *clone* the whole repo. Worthless. And cloning part of a repo # turns out to be completely broken. The only way to specify a ""path"" is.. # a path *on the server*? The contents of which, specifies the paths. Wat. # Obviously this is hopelessly broken and insecure, because you can query # arbitrary paths on the server! So nobody enables this. # What we have to do is disable *all* files. We could then just checkout # the path, and it'd ""work"", but be hopelessly slow...Because it goes and # transfers every missing object one-by-one. So the final piece is that we # need to use some weird git internals to fetch the missings in bulk, and # *that* we can do by path. # We're using Git and sparse checkout to only clone the files we need with make_tempdir() as tmp_dir: # This is the ""clone, but don't download anything"" part. cmd = ( f""git clone {repo} {tmp_dir} --no-checkout --depth 1 "" f""-b {branch} --filter=blob:none"" ) run_command(cmd) # Now we need to find the missing filenames for the subpath we want. # Looking for this 'rev-list' command in the git --help? Hah. cmd = f""git -C {tmp_dir} rev-list --objects --all --missing=print -- {subpath}"" ret = run_command(cmd, capture=True) git_repo = _http_to_git(repo) # Now pass those missings into another bit of git internals missings = "" "".join([x[1:] for x in ret.stdout.split() if x.startswith(""?"")]) if not missings: err = ( f""Could not find any relevant files for '{subpath}'. "" f""Did you specify a correct and complete path within repo '{repo}' "" f""and branch {branch}?"" ) msg.fail(err, exits=1) cmd = f""git -C {tmp_dir} fetch-pack {git_repo} {missings}"" run_command(cmd, capture=True) # And finally, we can checkout our subpath cmd = f""git -C {tmp_dir} checkout {branch} {subpath}"" run_command(cmd, capture=True) # Get a subdirectory of the cloned path, if approriate source_path = tmp_dir / Path(subpath) if not is_subpath_of(tmp_dir, source_path): err = f""{subpath!r} is a path outside of the cloned repository."" msg.fail(err, repo, exits=1) shutil.move(str(source_path), str(dest)) ","def git_sparse_checkout(repo, subpath, dest, branch): # We're using Git, partial clone and sparse checkout to # only clone the files we need # This ends up being RIDICULOUS. omg. # So, every tutorial and SO post talks about 'sparse checkout'...But they # go and *clone* the whole repo. Worthless. And cloning part of a repo # turns out to be completely broken. The only way to specify a ""path"" is.. # a path *on the server*? The contents of which, specifies the paths. Wat. # Obviously this is hopelessly broken and insecure, because you can query # arbitrary paths on the server! So nobody enables this. # What we have to do is disable *all* files. We could then just checkout # the path, and it'd ""work"", but be hopelessly slow...Because it goes and # transfers every missing object one-by-one. So the final piece is that we # need to use some weird git internals to fetch the missings in bulk, and # *that* we can do by path. # We're using Git and sparse checkout to only clone the files we need with make_tempdir() as tmp_dir: # This is the ""clone, but don't download anything"" part. cmd = ( f""git clone {repo} {tmp_dir} --no-checkout --depth 1 "" f""-b {branch} --filter=blob:none"" ) run_command(cmd) # Now we need to find the missing filenames for the subpath we want. # Looking for this 'rev-list' command in the git --help? Hah. cmd = f""git -C {tmp_dir} rev-list --objects --all --missing=print -- {subpath}"" ret = run_command(cmd, capture=True) git_repo = _http_to_git(repo) # Now pass those missings into another bit of git internals missings = "" "".join([x[1:] for x in ret.stdout.split() if x.startswith(""?"")]) if not missings: err = ( f""Could not find any relevant files for '{subpath}'. "" f""Did you specify a correct and complete path within repo '{repo}' "" f""and branch {branch}?"" ) msg.fail(err, exits=1) cmd = f""git -C {tmp_dir} fetch-pack {git_repo} {missings}"" run_command(cmd, capture=True) # And finally, we can checkout our subpath cmd = f""git -C {tmp_dir} checkout {branch} {subpath}"" run_command(cmd, capture=True) # Get a subdirectory of the cloned path, if appropriate source_path = tmp_dir / Path(subpath) if not is_subpath_of(tmp_dir, source_path): err = f""{subpath!r} is a path outside of the cloned repository."" msg.fail(err, repo, exits=1) shutil.move(str(source_path), str(dest)) " 47772,"def test__cli__command_extra_config_fail(): """"""Check the script raises the right exception on a # noqa: D415 non-existant extra config path. """""" result = invoke_assert_code( ret_code=66, args=[ lint, [ ""--config"", ""test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd"", ""test/fixtures/cli/extra_config_tsql.sql"", ], ], ) assert ( ""Extra config 'test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd' does not "" ""exist."" in result.stdout ) ","def test__cli__command_extra_config_fail(): """"""Check the script raises the right exception on a # noqa: D415 non-existent extra config path. """""" result = invoke_assert_code( ret_code=66, args=[ lint, [ ""--config"", ""test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd"", ""test/fixtures/cli/extra_config_tsql.sql"", ], ], ) assert ( ""Extra config 'test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd' does not "" ""exist."" in result.stdout ) " 33772,"def unflatten_list_dict(dt, delimiter=""/""): """"""Unflatten nested dict and list."""""" out_type = list if list(dt)[0].split(delimiter, 1)[0].isdigit() else dict out = out_type() for key, val in dt.items(): path = key.split(delimiter) item = out for i, k in enumerate(path[:-1]): next_type = list if path[i+1].isdigit() else dict if isinstance(item, dict): item = item.setdefault(k, next_type()) elif isinstance(item, list): if int(k) >= len(item): item.append(next_type()) assert int(k) == len(item) - 1 item = item[int(k)] if isinstance(item, dict): item[path[-1]] = val elif isinstance(item, list): item.append(val) assert int(path[-1]) == len(item) - 1 return out ","def unflatten_list_dict(dt, delimiter=""/""): """"""Unflatten nested dict and list."""""" out_type = list if list(dt)[0].split(delimiter, 1)[0].isdigit() else dict out = out_type() for key, val in dt.items(): path = key.split(delimiter) item = out for i, k in enumerate(path[:-1]): next_type = list if path[i+1].isdigit() else dict if isinstance(item, dict): item = item.setdefault(k, next_type()) elif isinstance(item, (list, tuple)): if int(k) >= len(item): item.append(next_type()) assert int(k) == len(item) - 1 item = item[int(k)] if isinstance(item, dict): item[path[-1]] = val elif isinstance(item, list): item.append(val) assert int(path[-1]) == len(item) - 1 return out " 32067,"def test_module( client: Client, ): URL_SUFFIX = '/restapi/json/v1/resources/resourcetypes' headers = { 'APP_AUTHTOKEN': client._app_token, 'APP_TYPE': '17' } r = requests.request(""GET"", client._base_url + URL_SUFFIX, headers=headers, verify=client._verify) if r.status_code != 200: return 'Failed to connect to server' else: return 'ok' ","def test_module( client: Client, ): URL_SUFFIX = '/restapi/json/v1/resources/resourcetypes' headers = { 'APP_AUTHTOKEN': client._app_token, 'APP_TYPE': '17' } r = requests.request(""GET"", client._base_url + URL_SUFFIX, headers=headers, verify=client._verify) if r.status_code != 200: return_results('Failed to connect to server') else: return 'ok' " 8476,"def add_qt5_dependencies(hook_file): # Accumulate all dependencies in a set to avoid duplicates. hiddenimports = set() translations_base = set() plugins = set() # Find the module underlying this Qt hook: change # ``/path/to/hook-PyQt5.blah.py`` to ``PyQt5.blah``. hook_name, hook_ext = os.path.splitext(os.path.basename(hook_file)) assert hook_ext.startswith('.py') assert hook_name.startswith('hook-') module_name = hook_name[5:] namespace = module_name.split('.')[0] if namespace not in ('PyQt5', 'PySide2'): raise Exception('Invalid namespace: {0}'.format(namespace)) is_PyQt5 = namespace == 'PyQt5' # Exit if the requested library can't be imported. if ((is_PyQt5 and not pyqt5_library_info.version) or (not is_PyQt5 and not pyside2_library_info.version)): return [], [], [] # Look up the module returned by this import. module = get_module_file_attribute(module_name) logger.debug('add_qt5_dependencies: Examining %s, based on hook of %s.', module, hook_file) # Walk through all the static dependencies of a dynamically-linked library # (``.so``/``.dll``/``.dylib``). imports = set(getImports(module)) while imports: imp = imports.pop() # On Windows, find this library; other platforms already provide the # full path. if is_win: imp = getfullnameof(imp, # First, look for Qt binaries in the local Qt install. pyqt5_library_info.location['BinariesPath'] if is_PyQt5 else pyside2_library_info.location['BinariesPath'] ) # Strip off the extension and ``lib`` prefix (Linux/Mac) to give the raw # name. Lowercase (since Windows always normalized names to lowercase). lib_name = os.path.splitext(os.path.basename(imp))[0].lower() # Linux libraries sometimes have a dotted version number -- # ``libfoo.so.3``. It's now ''libfoo.so``, but the ``.so`` must also be # removed. if is_linux and os.path.splitext(lib_name)[1] == '.so': lib_name = os.path.splitext(lib_name)[0] if lib_name.startswith('lib'): lib_name = lib_name[3:] if is_cygwin: if os.path.splitext(lib_name)[1] == '.dll': lib_name = os.path.splitext(lib_name)[0] if lib_name.startswith('cyg'): lib_name = lib_name[3:] if re.findall('-[0-9]', lib_name) != []: lib_name = re.split('-[0-9]', lib_name)[0] # Mac: rename from ``qt`` to ``qt5`` to match names in Windows/Linux. if is_darwin and lib_name.startswith('qt'): lib_name = 'qt5' + lib_name[2:] # match libs with QT_LIBINFIX set to '_conda', i.e. conda-forge builds if lib_name.endswith('_conda'): lib_name = lib_name[:-6] logger.debug('add_qt5_dependencies: raw lib %s -> parsed lib %s', imp, lib_name) # Follow only Qt dependencies. if lib_name in _qt_dynamic_dependencies_dict: # Follow these to find additional dependencies. logger.debug('add_qt5_dependencies: Import of %s.', imp) imports.update(getImports(imp)) # Look up which plugins and translations are needed. dd = _qt_dynamic_dependencies_dict[lib_name] lib_name_hiddenimports, lib_name_translations_base = dd[:2] lib_name_plugins = dd[2:] # Add them in. if lib_name_hiddenimports: hiddenimports.update([namespace + lib_name_hiddenimports]) plugins.update(lib_name_plugins) if lib_name_translations_base: translations_base.update([lib_name_translations_base]) # Change plugins into binaries. binaries = [] for plugin in plugins: more_binaries = qt_plugins_binaries(plugin, namespace=namespace) binaries.extend(more_binaries) # Change translation_base to datas. tp = ( pyqt5_library_info.location['TranslationsPath'] if is_PyQt5 else pyside2_library_info.location['TranslationsPath'] ) datas = [] for tb in translations_base: src = os.path.join(tp, tb + '_*.qm') # Not all PyQt5 installations include translations. See # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359479893 # and # https://github.com/pyinstaller/pyinstaller/issues/2857#issuecomment-368744341. if glob.glob(src): datas.append(( src, os.path.join( # The PySide2 Windows wheels place translations in a # different location. namespace, '' if not is_PyQt5 and is_win else 'Qt', 'translations' ) )) else: logger.warning('Unable to find Qt5 translations %s. These ' 'translations were not packaged.', src) # Change hiddenimports to a list. hiddenimports = list(hiddenimports) logger.debug('add_qt5_dependencies: imports from %s:\n' ' hiddenimports = %s\n' ' binaries = %s\n' ' datas = %s', hook_name, hiddenimports, binaries, datas) return hiddenimports, binaries, datas ","def add_qt5_dependencies(hook_file): # Accumulate all dependencies in a set to avoid duplicates. hiddenimports = set() translations_base = set() plugins = set() # Find the module underlying this Qt hook: change # ``/path/to/hook-PyQt5.blah.py`` to ``PyQt5.blah``. hook_name, hook_ext = os.path.splitext(os.path.basename(hook_file)) assert hook_ext.startswith('.py') assert hook_name.startswith('hook-') module_name = hook_name[5:] namespace = module_name.split('.')[0] if namespace not in ('PyQt5', 'PySide2'): raise Exception('Invalid namespace: {0}'.format(namespace)) is_PyQt5 = namespace == 'PyQt5' # Exit if the requested library can't be imported. if ((is_PyQt5 and not pyqt5_library_info.version) or (not is_PyQt5 and not pyside2_library_info.version)): return [], [], [] # Look up the module returned by this import. module = get_module_file_attribute(module_name) logger.debug('add_qt5_dependencies: Examining %s, based on hook of %s.', module, hook_file) # Walk through all the static dependencies of a dynamically-linked library # (``.so``/``.dll``/``.dylib``). imports = set(getImports(module)) while imports: imp = imports.pop() # On Windows, find this library; other platforms already provide the # full path. if is_win: imp = getfullnameof(imp, # First, look for Qt binaries in the local Qt install. pyqt5_library_info.location['BinariesPath'] if is_PyQt5 else pyside2_library_info.location['BinariesPath'] ) # Strip off the extension and ``lib`` prefix (Linux/Mac) to give the raw # name. Lowercase (since Windows always normalized names to lowercase). lib_name = os.path.splitext(os.path.basename(imp))[0].lower() # Linux libraries sometimes have a dotted version number -- # ``libfoo.so.3``. It's now ''libfoo.so``, but the ``.so`` must also be # removed. if is_linux and os.path.splitext(lib_name)[1] == '.so': lib_name = os.path.splitext(lib_name)[0] if lib_name.startswith('lib'): lib_name = lib_name[3:] if is_cygwin: if lib_name.endswith('.dll'): lib_name = os.path.splitext(lib_name)[0] if lib_name.startswith('cyg'): lib_name = lib_name[3:] if re.findall('-[0-9]', lib_name) != []: lib_name = re.split('-[0-9]', lib_name)[0] # Mac: rename from ``qt`` to ``qt5`` to match names in Windows/Linux. if is_darwin and lib_name.startswith('qt'): lib_name = 'qt5' + lib_name[2:] # match libs with QT_LIBINFIX set to '_conda', i.e. conda-forge builds if lib_name.endswith('_conda'): lib_name = lib_name[:-6] logger.debug('add_qt5_dependencies: raw lib %s -> parsed lib %s', imp, lib_name) # Follow only Qt dependencies. if lib_name in _qt_dynamic_dependencies_dict: # Follow these to find additional dependencies. logger.debug('add_qt5_dependencies: Import of %s.', imp) imports.update(getImports(imp)) # Look up which plugins and translations are needed. dd = _qt_dynamic_dependencies_dict[lib_name] lib_name_hiddenimports, lib_name_translations_base = dd[:2] lib_name_plugins = dd[2:] # Add them in. if lib_name_hiddenimports: hiddenimports.update([namespace + lib_name_hiddenimports]) plugins.update(lib_name_plugins) if lib_name_translations_base: translations_base.update([lib_name_translations_base]) # Change plugins into binaries. binaries = [] for plugin in plugins: more_binaries = qt_plugins_binaries(plugin, namespace=namespace) binaries.extend(more_binaries) # Change translation_base to datas. tp = ( pyqt5_library_info.location['TranslationsPath'] if is_PyQt5 else pyside2_library_info.location['TranslationsPath'] ) datas = [] for tb in translations_base: src = os.path.join(tp, tb + '_*.qm') # Not all PyQt5 installations include translations. See # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359479893 # and # https://github.com/pyinstaller/pyinstaller/issues/2857#issuecomment-368744341. if glob.glob(src): datas.append(( src, os.path.join( # The PySide2 Windows wheels place translations in a # different location. namespace, '' if not is_PyQt5 and is_win else 'Qt', 'translations' ) )) else: logger.warning('Unable to find Qt5 translations %s. These ' 'translations were not packaged.', src) # Change hiddenimports to a list. hiddenimports = list(hiddenimports) logger.debug('add_qt5_dependencies: imports from %s:\n' ' hiddenimports = %s\n' ' binaries = %s\n' ' datas = %s', hook_name, hiddenimports, binaries, datas) return hiddenimports, binaries, datas " 23133,"def test_roll_bug(): # This bug was exposed in GitHub Issue #8723 x = da.arange(2, 3) y = da.roll(x, 1) y[0] = 0 assert x[0].compute() == 2 ","def test_roll_bug(): # This bug was exposed in GitHub Issue #8723 x = da.arange(2, 3) y = da.roll(x, 1) assert y is not x " 37820,"def get_build_identifiers( platform: PlatformName, build_selector: BuildSelector, architectures: Set[Architecture] ) -> List[str]: python_configurations: Union[ List[cibuildwheel.linux.PythonConfiguration], List[cibuildwheel.windows.PythonConfiguration], List[cibuildwheel.macos.PythonConfiguration], ] if platform == ""linux"" or platform == ""crosslinux"": python_configurations = cibuildwheel.linux.get_python_configurations( build_selector, architectures ) elif platform == ""windows"": python_configurations = cibuildwheel.windows.get_python_configurations( build_selector, architectures ) elif platform == ""macos"": python_configurations = cibuildwheel.macos.get_python_configurations( build_selector, architectures ) else: assert_never(platform) return [config.identifier for config in python_configurations] ","def get_build_identifiers( platform: PlatformName, build_selector: BuildSelector, architectures: Set[Architecture] ) -> List[str]: python_configurations: Union[ List[cibuildwheel.linux.PythonConfiguration], List[cibuildwheel.windows.PythonConfiguration], List[cibuildwheel.macos.PythonConfiguration], ] if platform in {""linux"", ""crosslinux""}: python_configurations = cibuildwheel.linux.get_python_configurations( build_selector, architectures ) elif platform == ""windows"": python_configurations = cibuildwheel.windows.get_python_configurations( build_selector, architectures ) elif platform == ""macos"": python_configurations = cibuildwheel.macos.get_python_configurations( build_selector, architectures ) else: assert_never(platform) return [config.identifier for config in python_configurations] " 19997,"def test_plantcv_transform_find_color_card_optional_parameters(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir # Test with threshold ='normal' df1, start1, space1 = pcv.transform.find_color_card(img=rgb_img, threshold='normal', blurry=True, background='light') _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start1, spacing=space1, nrows=6, ncols=4, exclude=[20, 0]) # Test with threshold='otsu' df2, start2, space2 = pcv.transform.find_color_card(img=rgb_img, threshold='otsu', blurry=True) _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) ","def test_plantcv_transform_find_color_card_optional_parameters(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir # Test with threshold ='normal' df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold='normal', blurry=True, background='light') _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start1, spacing=space1, nrows=6, ncols=4, exclude=[20, 0]) # Test with threshold='otsu' df2, start2, space2 = pcv.transform.find_color_card(img=rgb_img, threshold='otsu', blurry=True) _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start2, spacing=space2, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) " 3235,"def selector_func(method, callargs, switchover_timestamp): spec = method_specifications.get(method) if spec is None: return ""redis"" # default backend (possibly invoke base directly instead?) if switchover_timestamp and time.time() < switchover_timestamp: return ""redis"" # snuba does not yet have all data operation_type, model_extractor = spec backends = {model_backends[model][operation_type] for model in model_extractor(callargs)} assert len(backends) == 1, ""request was not directed to a single backend"" return backends.pop() ","def selector_func(method, callargs, switchover_timestamp=None): spec = method_specifications.get(method) if spec is None: return ""redis"" # default backend (possibly invoke base directly instead?) if switchover_timestamp and time.time() < switchover_timestamp: return ""redis"" # snuba does not yet have all data operation_type, model_extractor = spec backends = {model_backends[model][operation_type] for model in model_extractor(callargs)} assert len(backends) == 1, ""request was not directed to a single backend"" return backends.pop() " 39360,"def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)): """"""Create a Platonic solid of a given size. Parameters ---------- kind : str or int, optional The kind of Platonic solid to create. Either the name of the polyhedron or an integer index: * ``'tetrahedron'`` or ``0`` * ``'cube'`` or ``1`` * ``'octahedron'`` or ``2`` * ``'icosahedron'`` or ``3`` * ``'dodecahedron'`` or ``4`` radius : float, optional The radius of the circumscribed sphere for the solid to create. center : sequence, optional Three-length sequence defining the center of the solid to create. Returns ------- pyvista.PolyData One of the five Platonic solids. Cell scalars are defined that assign integer labels to each face (with array name ``""FaceIndex""``). Examples -------- Create and plot a dodecahedron >>> import pyvista >>> dodeca = pyvista.PlatonicSolid('dodecahedron') >>> dodeca.plot(categories=True) """""" kinds = { 'tetrahedron': 0, 'cube': 1, 'octahedron': 2, 'icosahedron': 3, 'dodecahedron': 4, } if isinstance(kind, str): if kind not in kinds: raise ValueError(f'Invalid Platonic solid kind ""{kind}"".') kind = kinds[kind] elif isinstance(kind, int) and kind not in range(5): raise ValueError(f'Invalid Platonic solid index ""{kind}"".') elif not isinstance(kind, int): raise ValueError('Invalid Platonic solid index type ' f'""{type(kind).__name__}"".') check_valid_vector(center, 'center') solid = _vtk.vtkPlatonicSolidSource() solid.SetSolidType(kind) solid.Update() solid = pyvista.wrap(solid.GetOutput()) solid.scale(radius) solid.points += np.asanyarray(center) - solid.center # rename and activate cell scalars cell_data = solid.get_array(0) solid.clear_data() solid.cell_data['FaceIndex'] = cell_data return solid ","def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)): """"""Create a Platonic solid of a given size. Parameters ---------- kind : str or int, optional The kind of Platonic solid to create. Either the name of the polyhedron or an integer index: * ``'tetrahedron'`` or ``0`` * ``'cube'`` or ``1`` * ``'octahedron'`` or ``2`` * ``'icosahedron'`` or ``3`` * ``'dodecahedron'`` or ``4`` radius : float, optional The radius of the circumscribed sphere for the solid to create. center : sequence, optional Three-length sequence defining the center of the solid to create. Returns ------- pyvista.PolyData One of the five Platonic solids. Cell scalars are defined that assign integer labels to each face (with array name ``""FaceIndex""``). Examples -------- Create and plot a dodecahedron. >>> import pyvista >>> dodeca = pyvista.PlatonicSolid('dodecahedron') >>> dodeca.plot(categories=True) """""" kinds = { 'tetrahedron': 0, 'cube': 1, 'octahedron': 2, 'icosahedron': 3, 'dodecahedron': 4, } if isinstance(kind, str): if kind not in kinds: raise ValueError(f'Invalid Platonic solid kind ""{kind}"".') kind = kinds[kind] elif isinstance(kind, int) and kind not in range(5): raise ValueError(f'Invalid Platonic solid index ""{kind}"".') elif not isinstance(kind, int): raise ValueError('Invalid Platonic solid index type ' f'""{type(kind).__name__}"".') check_valid_vector(center, 'center') solid = _vtk.vtkPlatonicSolidSource() solid.SetSolidType(kind) solid.Update() solid = pyvista.wrap(solid.GetOutput()) solid.scale(radius) solid.points += np.asanyarray(center) - solid.center # rename and activate cell scalars cell_data = solid.get_array(0) solid.clear_data() solid.cell_data['FaceIndex'] = cell_data return solid " 33941,"def scrub_traceback(ex): assert isinstance(ex, str) print(ex) ex = ex.strip(""\n"") ex = re.sub(""pid=[0-9]+,"", ""pid=XXX,"", ex) ex = re.sub(""ip=[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"", ""ip=YYY"", ex) ex = re.sub(""repr=.*\)"", ""repr=ZZZ)"", ex) ex = re.sub(""line .*,"", ""line ZZ,"", ex) ex = re.sub('"".*""', '""FILE""', ex) # These are used to coloring the string. ex = re.sub(""\\x1b\[36m"", """", ex) ex = re.sub(""\\x1b\[39m"", """", ex) # When running bazel test with pytest 6.x, the module name becomes # ""python.ray.tests.test_traceback"" instead of just ""test_traceback"" ex = re.sub(""python.ray.tests.test_traceback"", ""test_traceback"", ex) # Clean up object address. ex = re.sub(""object at .*>"", ""object at ADDRESS>"", ex) return ex ","def scrub_traceback(ex): assert isinstance(ex, str) print(ex) ex = ex.strip(""\n"") ex = re.sub(""pid=[0-9]+,"", ""pid=XXX,"", ex) ex = re.sub(""ip=[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"", ""ip=YYY"", ex) ex = re.sub(""repr=.*\)"", ""repr=ZZZ)"", ex) ex = re.sub(""line .*,"", ""line ZZ,"", ex) ex = re.sub('"".*""', '""FILE""', ex) # These are used to coloring the string. ex = re.sub(""\\x1b\[36m"", """", ex) ex = re.sub(""\\x1b\[39m"", """", ex) # When running bazel test with pytest 6.x, the module name becomes # ""python.ray.tests.test_traceback"" instead of just ""test_traceback"" ex = re.sub(r""python\.ray\.tests\.test_traceback"", ""test_traceback"", ex) # Clean up object address. ex = re.sub(""object at .*>"", ""object at ADDRESS>"", ex) return ex " 42346,"def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error ","def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get a role argument specification. .. note:: Version added: 2.2 :param str role: Simple role name, or fully qualified collection role name, to query. :param str collection: If specified, will be combined with the role name to form a fully qualified collection role name. If this is supplied, the ``role`` param should not be fully qualified. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an Ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_argspec_command(role, collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error " 5360,"def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **kwargs): """""" Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed `, :py:mod:`pkg.removed `, and :py:mod:`pkg.purged ` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : None If this option is not explicitly set, and there is no epoch in the desired package version, the epoch will be implicitly ignored. Set this argument to ``True`` to explicitly ignore the epoch, and ``False`` to strictly enforce it. .. versionadded:: 2015.8.9 .. versionchanged:: 3001 In prior releases, the default behavior was to strictly enforce epochs unless this argument was set to ``True``. Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 """""" kwargs[""saltenv""] = __env__ try: return _uninstall( action=""remove"", name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs ) except CommandExecutionError as exc: ret = {""name"": name, ""result"": False} if exc.info: # Get information for state return from the exception. ret[""changes""] = exc.info.get(""changes"", {}) ret[""comment""] = exc.strerror_without_changes else: ret[""changes""] = {} ret[ ""comment"" ] = ""An error was encountered while removing "" ""package(s): {}"".format(exc) return ret ","def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **kwargs): """""" Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed `, :py:mod:`pkg.removed `, and :py:mod:`pkg.purged ` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : None If this option is not explicitly set, and there is no epoch in the desired package version, the epoch will be implicitly ignored. Set this argument to ``True`` to explicitly ignore the epoch, and ``False`` to strictly enforce it. .. versionadded:: 2015.8.9 .. versionchanged:: 3001 In prior releases, the default behavior was to strictly enforce epochs unless this argument was set to ``True``. Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 """""" kwargs[""saltenv""] = __env__ try: return _uninstall( action=""remove"", name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs ) except CommandExecutionError as exc: ret = {""name"": name, ""result"": False} if exc.info: # Get information for state return from the exception. ret[""changes""] = exc.info.get(""changes"", {}) ret[""comment""] = exc.strerror_without_changes else: ret[""changes""] = {} ret[ ""comment"" ] = ""An error was encountered while removing package(s): {}"".format(exc) return ret " 34197,"def create_http_input_channels( channel: Optional[Text], credentials_file: Optional[Text] ) -> List[""InputChannel""]: """"""Instantiate the chosen input channel."""""" if credentials_file: all_credentials = rasa.utils.io.read_config_file(credentials_file) else: all_credentials = {} if channel: if len(all_credentials) > 1: logger.warning( ""Only credentials for chosen {} connector will be loaded."".format( channel ) ) return [_create_single_channel(channel, all_credentials.get(channel))] else: return [_create_single_channel(c, k) for c, k in all_credentials.items()] ","def create_http_input_channels( channel: Optional[Text], credentials_file: Optional[Text] ) -> List[""InputChannel""]: """"""Instantiate the chosen input channel."""""" if credentials_file: all_credentials = rasa.utils.io.read_config_file(credentials_file) else: all_credentials = {} if channel: if len(all_credentials) > 1: logger.info( ""Only credentials for chosen {} connector will be loaded."".format( channel ) ) return [_create_single_channel(channel, all_credentials.get(channel))] else: return [_create_single_channel(c, k) for c, k in all_credentials.items()] " 57838,"def main(): params = demisto.params() aws_default_region = params.get('defaultRegion') aws_role_arn = params.get('roleArn') aws_role_session_name = params.get('roleSessionName') aws_role_session_duration = params.get('sessionDuration') aws_role_policy = None aws_access_key_id = params.get('access_key') aws_secret_access_key = params.get('secret_key') verify_certificate = not params.get('insecure', True) timeout = demisto.params().get('timeout') retries = demisto.params().get('retries') or 5 try: validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id, aws_secret_access_key) aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration, aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout, retries) command = demisto.command() args = demisto.args() LOG('Command being called is {command}'.format(command=demisto.command())) if command == 'test-module': client = aws_client.aws_session(service='s3') response = client.list_buckets() if response['ResponseMetadata']['HTTPStatusCode'] == 200: demisto.results('ok') elif command == 'aws-s3-create-bucket': create_bucket_command(args, aws_client) elif command == 'aws-s3-delete-bucket': delete_bucket_command(args, aws_client) elif command == 'aws-s3-list-buckets': list_buckets_command(args, aws_client) elif command == 'aws-s3-get-bucket-policy': get_bucket_policy_command(args, aws_client) elif command == 'aws-s3-put-bucket-policy': put_bucket_policy_command(args, aws_client) elif command == 'aws-s3-delete-bucket-policy': delete_bucket_policy_command(args, aws_client) elif command == 'aws-s3-download-file': download_file_command(args, aws_client) elif command == 'aws-s3-list-bucket-objects': list_objects_command(args, aws_client) elif command == 'aws-s3-upload-file': upload_file_command(args, aws_client) except Exception as e: return_error('Error has occurred in the AWS S3 Integration: {error}\n {message}'.format( error=type(e), message=e.message)) ","def main(): params = demisto.params() aws_default_region = params.get('defaultRegion') aws_role_arn = params.get('roleArn') aws_role_session_name = params.get('roleSessionName') aws_role_session_duration = params.get('sessionDuration') aws_role_policy = None aws_access_key_id = params.get('access_key') aws_secret_access_key = params.get('secret_key') verify_certificate = not params.get('insecure', True) timeout = params.get('timeout') retries = params.get('retries') or 5 try: validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id, aws_secret_access_key) aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration, aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout, retries) command = demisto.command() args = demisto.args() LOG('Command being called is {command}'.format(command=demisto.command())) if command == 'test-module': client = aws_client.aws_session(service='s3') response = client.list_buckets() if response['ResponseMetadata']['HTTPStatusCode'] == 200: demisto.results('ok') elif command == 'aws-s3-create-bucket': create_bucket_command(args, aws_client) elif command == 'aws-s3-delete-bucket': delete_bucket_command(args, aws_client) elif command == 'aws-s3-list-buckets': list_buckets_command(args, aws_client) elif command == 'aws-s3-get-bucket-policy': get_bucket_policy_command(args, aws_client) elif command == 'aws-s3-put-bucket-policy': put_bucket_policy_command(args, aws_client) elif command == 'aws-s3-delete-bucket-policy': delete_bucket_policy_command(args, aws_client) elif command == 'aws-s3-download-file': download_file_command(args, aws_client) elif command == 'aws-s3-list-bucket-objects': list_objects_command(args, aws_client) elif command == 'aws-s3-upload-file': upload_file_command(args, aws_client) except Exception as e: return_error('Error has occurred in the AWS S3 Integration: {error}\n {message}'.format( error=type(e), message=e.message)) " 46002,"def binary_focal_loss_with_logits( input: torch.Tensor, target: torch.Tensor, alpha: float = 0.25, gamma: float = 2.0, reduction: str = 'none', eps: Optional[float] = None, pos_weight: Optional[torch.Tensor] = None, ) -> torch.Tensor: r""""""Function that computes Binary Focal loss. .. math:: \text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input: input data tensor of arbitrary shape. target: the target tensor with shape matching input. alpha: Weighting factor for the rare class :math:`\alpha \in [0, 1]`. gamma: Focusing parameter :math:`\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. eps: Deprecated: scalar for numerically stability when dividing. This is no longer used. pos_weight: a weight of positive examples. It’s possible to trade off recall and precision by adding weights to positive examples. Must be a vector with length equal to the number of classes. Returns: the computed loss. Examples: >>> kwargs = {""alpha"": 0.25, ""gamma"": 2.0, ""reduction"": 'mean'} >>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(21.8725) """""" if eps is not None and not torch.jit.is_scripting(): warnings.warn( ""`binary_focal_loss_with_logits` has been reworked for improved numerical stability "" ""and the `eps` argument is no longer necessary"", DeprecationWarning, stacklevel=2, ) if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not len(input.shape) >= 2: raise ValueError(f""Invalid input shape, we expect BxCx*. Got: {input.shape}"") if input.size(0) != target.size(0): raise ValueError(f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).') if pos_weight is None: pos_weight = torch.ones(input.size(-1), device=input.device) elif not isinstance(pos_weight, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") elif input.size(-1) != pos_weight.size(0): raise ValueError(f""Expected pos_weight size ({pos_weight.size(0)}) to match number of "" f""classes ({input.size(1)})"") probs_pos = torch.sigmoid(input) probs_neg = torch.sigmoid(-input) loss_tmp = -alpha * pos_weight * torch.pow(probs_neg, gamma) * target * F.logsigmoid(input) - ( 1 - alpha ) * torch.pow(probs_pos, gamma) * (1.0 - target) * F.logsigmoid(-input) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError(f""Invalid reduction mode: {reduction}"") return loss ","def binary_focal_loss_with_logits( input: torch.Tensor, target: torch.Tensor, alpha: float = 0.25, gamma: float = 2.0, reduction: str = 'none', eps: Optional[float] = None, pos_weight: Optional[torch.Tensor] = None, ) -> torch.Tensor: r""""""Function that computes Binary Focal loss. .. math:: \text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input: input data tensor of arbitrary shape. target: the target tensor with shape matching input. alpha: Weighting factor for the rare class :math:`\alpha \in [0, 1]`. gamma: Focusing parameter :math:`\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. eps: Deprecated: scalar for numerically stability when dividing. This is no longer used. pos_weight: a weight of positive examples. It’s possible to trade off recall and precision by adding weights to positive examples. Must be a vector with length equal to the number of classes. Returns: the computed loss. Examples: >>> kwargs = {""alpha"": 0.25, ""gamma"": 2.0, ""reduction"": 'mean'} >>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(21.8725) """""" if eps is not None and not torch.jit.is_scripting(): warnings.warn( ""`binary_focal_loss_with_logits` has been reworked for improved numerical stability "" ""and the `eps` argument is no longer necessary"", DeprecationWarning, stacklevel=2, ) if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not len(input.shape) >= 2: raise ValueError(f""Invalid input shape, we expect BxCx*. Got: {input.shape}"") if input.size(0) != target.size(0): raise ValueError(f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).') if pos_weight is None: pos_weight = torch.ones(input.size(-1), device=input.device, dtype=input.dtype) elif not isinstance(pos_weight, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") elif input.size(-1) != pos_weight.size(0): raise ValueError(f""Expected pos_weight size ({pos_weight.size(0)}) to match number of "" f""classes ({input.size(1)})"") probs_pos = torch.sigmoid(input) probs_neg = torch.sigmoid(-input) loss_tmp = -alpha * pos_weight * torch.pow(probs_neg, gamma) * target * F.logsigmoid(input) - ( 1 - alpha ) * torch.pow(probs_pos, gamma) * (1.0 - target) * F.logsigmoid(-input) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError(f""Invalid reduction mode: {reduction}"") return loss " 7275,"def test_1D(): x = np.arange(50) / 50 y = filters.gaussian(x, sigma=(1,)) ","def test_1D(): x = np.arange(50) / 50 y = gaussian(x, sigma=(1,)) " 42101,"def upgrade(): bind = op.get_bind() sa.Enum(TrialValueModel.TrialValueType).create(bind, checkfirst=True) # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE # ADD COLUMN ... DEFAULT ""FINITE_OR_NAN""', but seemingly Alembic # does not support such a SQL statement. So first add a column with schema-level # default value setting, then remove it by `batch_op.alter_column()`. with op.batch_alter_table(""trial_values"") as batch_op: batch_op.add_column( sa.Column( ""value_type"", sa.Enum(""FINITE"", ""INF_POS"", ""INF_NEG"", name=""trialvaluetype""), nullable=False, server_default=""FINITE"", ), ) with op.batch_alter_table(""trial_values"") as batch_op: batch_op.alter_column(""value_type"", server_default=None) batch_op.alter_column( ""value"", existing_type=sa.Float(precision=FLOAT_PRECISION), existing_nullable=False, nullable=True, ) session = orm.Session(bind=bind) try: records = session.query(TrialValueModel).all() mapping = [] for r in records: value: float if np.isclose(r.value, RDB_MAX_FLOAT) or np.isposinf(r.value): value = float(""inf"") elif np.isclose(r.value, RDB_MIN_FLOAT) or np.isneginf(r.value): value = float(""-inf"") elif np.isnan(r.value): value = float(""nan"") else: value = r.value ( stored_value, float_type, ) = TrialValueModel.value_to_stored_repr(value) mapping.append( { ""trial_value_id"": r.trial_value_id, ""value_type"": float_type, ""value"": stored_value, } ) session.bulk_update_mappings(TrialValueModel, mapping) session.commit() except SQLAlchemyError as e: session.rollback() raise e finally: session.close() ","def upgrade(): bind = op.get_bind() sa.Enum(TrialValueModel.TrialValueType).create(bind, checkfirst=True) # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE # ADD COLUMN ... DEFAULT ""FINITE""', but seemingly Alembic # does not support such a SQL statement. So first add a column with schema-level # default value setting, then remove it by `batch_op.alter_column()`. with op.batch_alter_table(""trial_values"") as batch_op: batch_op.add_column( sa.Column( ""value_type"", sa.Enum(""FINITE"", ""INF_POS"", ""INF_NEG"", name=""trialvaluetype""), nullable=False, server_default=""FINITE"", ), ) with op.batch_alter_table(""trial_values"") as batch_op: batch_op.alter_column(""value_type"", server_default=None) batch_op.alter_column( ""value"", existing_type=sa.Float(precision=FLOAT_PRECISION), existing_nullable=False, nullable=True, ) session = orm.Session(bind=bind) try: records = session.query(TrialValueModel).all() mapping = [] for r in records: value: float if np.isclose(r.value, RDB_MAX_FLOAT) or np.isposinf(r.value): value = float(""inf"") elif np.isclose(r.value, RDB_MIN_FLOAT) or np.isneginf(r.value): value = float(""-inf"") elif np.isnan(r.value): value = float(""nan"") else: value = r.value ( stored_value, float_type, ) = TrialValueModel.value_to_stored_repr(value) mapping.append( { ""trial_value_id"": r.trial_value_id, ""value_type"": float_type, ""value"": stored_value, } ) session.bulk_update_mappings(TrialValueModel, mapping) session.commit() except SQLAlchemyError as e: session.rollback() raise e finally: session.close() " 57911,"def redlock_list_scans(): """""" List DevOps Scans """""" group_by = demisto.args().get('group_by', 'scanId') page_size = demisto.args().get('page_size', 25) page_number = demisto.args().get('page_number', 1) sort = demisto.args().get('sort', None) filter_type = demisto.args().get('filter_type', 'relative') filter_time_amount = demisto.args().get('filter_time_amount', 1) filter_time_unit = demisto.args().get('filter_time_unit', 'day') filter_user = demisto.args().get('filter_user', None) filter_status = demisto.args().get('filter_status', None) filter_asset_type = demisto.args().get('filter_asset_type', None) filter_asset_name = demisto.args().get('filter_asset_name', None) filter_start_time = demisto.args().get('filter_start_time', None) filter_end_time = demisto.args().get('filter_end_time', None) list_filter = { 'groupBy': group_by, 'page[size]': page_size, 'page[number]': page_number, 'filter[timeType]': filter_type } if sort: list_filter['sort'] = sort if filter_type == 'relative': if filter_time_unit and filter_time_amount: list_filter['filter[timeUnit]'] = filter_time_unit list_filter['filter[timeAmount]'] = filter_time_amount else: return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter') elif filter_type == 'to_now': if filter_start_time: list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format=""%m/%d/%Y %H:%M:%S"") else: return_error('You must specify filter_start_time with to_now type filter') elif filter_type == 'absolute': if filter_start_time and filter_end_time: list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format=""%m/%d/%Y %H:%M:%S"") list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format=""%m/%d/%Y %H:%M:%S"") else: return_error('You must specify a filter_start_time and filter_end_time with absolute type filter') if filter_user: list_filter['filter[user]'] = filter_user if filter_status: list_filter['filter[status]'] = filter_status if filter_asset_type: list_filter['filter[assetType]'] = filter_asset_type if filter_asset_name: list_filter['filter[assetName]'] = filter_asset_name response = req('GET', 'iac/v2/scans', param_data=list_filter, data={}) if ( not response or 'data' not in response or not isinstance(response['data'], list) ): demisto.results('No results found') else: items = response['data'] readable_output = [] for item in items: readable_output.append({ ""ID"": item.get('id'), ""Name"": item.get('attributes')['name'], ""Type"": item.get('attributes')['type'], ""Scan Time"": item.get('attributes')['scanTime'], ""User"": item.get('attributes')['user'] }) md = tableToMarkdown(""Scans List:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': items, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': items}, 'HumanReadable': md }) ","def redlock_list_scans(): """""" List DevOps Scans """""" group_by = demisto.args().get('group_by', 'scanId') page_size = demisto.args().get('page_size', 25) page_number = demisto.args().get('page_number', 1) sort = demisto.args().get('sort', None) filter_type = demisto.args().get('filter_type', 'relative') filter_time_amount = demisto.args().get('filter_time_amount', 1) filter_time_unit = demisto.args().get('filter_time_unit', 'day') filter_user = demisto.args().get('filter_user', None) filter_status = demisto.args().get('filter_status', None) filter_asset_type = demisto.args().get('filter_asset_type', None) filter_asset_name = demisto.args().get('filter_asset_name', None) filter_start_time = demisto.args().get('filter_start_time', None) filter_end_time = demisto.args().get('filter_end_time', None) list_filter = { 'groupBy': group_by, 'page[size]': page_size, 'page[number]': page_number, 'filter[timeType]': filter_type } if sort: list_filter['sort'] = sort if filter_type == 'relative': if filter_time_unit and filter_time_amount: list_filter['filter[timeUnit]'] = filter_time_unit list_filter['filter[timeAmount]'] = filter_time_amount else: return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter') elif filter_type == 'to_now': if filter_start_time: list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format=""%m/%d/%Y %H:%M:%S"") else: return_error('You must specify filter_start_time with to_now type filter') elif filter_type == 'absolute': if filter_start_time and filter_end_time: list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format=""%m/%d/%Y %H:%M:%S"") list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format=""%m/%d/%Y %H:%M:%S"") else: return_error('You must specify a filter_start_time and filter_end_time with absolute type filter') if filter_user: list_filter['filter[user]'] = filter_user if filter_status: list_filter['filter[status]'] = filter_status if filter_asset_type: list_filter['filter[assetType]'] = filter_asset_type if filter_asset_name: list_filter['filter[assetName]'] = filter_asset_name response = req('GET', 'iac/v2/scans', param_data=list_filter, data={}) if ( not response or 'data' not in response or not isinstance(response['data'], list) ): demisto.results('No results found') else: items = response['data'] readable_output = [] for item in items: id = item.get('id') attributes = item.get('attributes', {}) readable_output.append({ ""ID"": id, ""Name"": attributes.get('name', []), ""Type"": attributes.get('type', []), ""Scan Time"": attributes.get('scanTime'), ""User"": attributes.get('user', []) }) md = tableToMarkdown(""Scans List:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': items, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': items}, 'HumanReadable': md }) " 39726,"def execute_notebook( input_path, output_path, parameters=None, engine_name=None, request_save_on_cell_execute=True, prepare_only=False, kernel_name=None, language=None, progress_bar=True, log_output=False, stdout_file=None, stderr_file=None, start_timeout=60, report_mode=False, cwd=None, **engine_kwargs ): """"""Executes a single notebook locally. Parameters ---------- input_path : str or Path Path to input notebook output_path : str or Path Path to save executed notebook parameters : dict, optional Arbitrary keyword arguments to pass to the notebook parameters engine_name : str, optional Name of execution engine to use request_save_on_cell_execute : bool, optional Request save notebook after each cell execution autosave_cell_every : int, optional How often in seconds to save in the middle of long cell executions prepare_only : bool, optional Flag to determine if execution should occur or not kernel_name : str, optional Name of kernel to execute the notebook against languages : str, optional Programming language of the notebook progress_bar : bool, optional Flag for whether or not to show the progress bar. log_output : bool, optional Flag for whether or not to write notebook output to the configured logger start_timeout : int, optional Duration in seconds to wait for kernel start-up report_mode : bool, optional Flag for whether or not to hide input. cwd : str or Path, optional Working directory to use when executing the notebook **kwargs Arbitrary keyword arguments to pass to the notebook engine Returns ------- nb : NotebookNode Executed notebook object """""" if isinstance(input_path, Path): input_path = str(input_path) if isinstance(output_path, Path): output_path = str(output_path) if isinstance(cwd, Path): cwd = str(cwd) path_parameters = add_builtin_parameters(parameters) input_path = parameterize_path(input_path, path_parameters) output_path = parameterize_path(output_path, path_parameters) logger.info(""Input Notebook: %s"" % get_pretty_path(input_path)) logger.info(""Output Notebook: %s"" % get_pretty_path(output_path)) with local_file_io_cwd(): if cwd is not None: logger.info(""Working directory: {}"".format(get_pretty_path(cwd))) nb = load_notebook_node(input_path) # Parameterize the Notebook. if parameters: nb = parameterize_notebook( nb, parameters, report_mode, kernel_name=kernel_name, language=language ) nb = prepare_notebook_metadata(nb, input_path, output_path, report_mode) # clear out any existing error markers from previous papermill runs nb = remove_error_markers(nb) if not prepare_only: # Fetch out the name from the notebook document kernel_name = nb_kernel_name(nb, kernel_name) # Execute the Notebook in `cwd` if it is set with chdir(cwd): nb = papermill_engines.execute_notebook_with_engine( engine_name, nb, input_path=input_path, output_path=output_path if request_save_on_cell_execute else None, kernel_name=kernel_name, progress_bar=progress_bar, log_output=log_output, start_timeout=start_timeout, stdout_file=stdout_file, stderr_file=stderr_file, **engine_kwargs ) # Check for errors first (it saves on error before raising) raise_for_execution_errors(nb, output_path) # Write final output in case the engine didn't write it on cell completion. write_ipynb(nb, output_path) return nb ","def execute_notebook( input_path, output_path, parameters=None, engine_name=None, request_save_on_cell_execute=True, prepare_only=False, kernel_name=None, language=None, progress_bar=True, log_output=False, stdout_file=None, stderr_file=None, start_timeout=60, report_mode=False, cwd=None, **engine_kwargs ): """"""Executes a single notebook locally. Parameters ---------- input_path : str or Path Path to input notebook output_path : str or Path Path to save executed notebook parameters : dict, optional Arbitrary keyword arguments to pass to the notebook parameters engine_name : str, optional Name of execution engine to use request_save_on_cell_execute : bool, optional Request save notebook after each cell execution autosave_cell_every : int, optional How often in seconds to save in the middle of long cell executions prepare_only : bool, optional Flag to determine if execution should occur or not kernel_name : str, optional Name of kernel to execute the notebook against language : str, optional Programming language of the notebook progress_bar : bool, optional Flag for whether or not to show the progress bar. log_output : bool, optional Flag for whether or not to write notebook output to the configured logger start_timeout : int, optional Duration in seconds to wait for kernel start-up report_mode : bool, optional Flag for whether or not to hide input. cwd : str or Path, optional Working directory to use when executing the notebook **kwargs Arbitrary keyword arguments to pass to the notebook engine Returns ------- nb : NotebookNode Executed notebook object """""" if isinstance(input_path, Path): input_path = str(input_path) if isinstance(output_path, Path): output_path = str(output_path) if isinstance(cwd, Path): cwd = str(cwd) path_parameters = add_builtin_parameters(parameters) input_path = parameterize_path(input_path, path_parameters) output_path = parameterize_path(output_path, path_parameters) logger.info(""Input Notebook: %s"" % get_pretty_path(input_path)) logger.info(""Output Notebook: %s"" % get_pretty_path(output_path)) with local_file_io_cwd(): if cwd is not None: logger.info(""Working directory: {}"".format(get_pretty_path(cwd))) nb = load_notebook_node(input_path) # Parameterize the Notebook. if parameters: nb = parameterize_notebook( nb, parameters, report_mode, kernel_name=kernel_name, language=language ) nb = prepare_notebook_metadata(nb, input_path, output_path, report_mode) # clear out any existing error markers from previous papermill runs nb = remove_error_markers(nb) if not prepare_only: # Fetch out the name from the notebook document kernel_name = nb_kernel_name(nb, kernel_name) # Execute the Notebook in `cwd` if it is set with chdir(cwd): nb = papermill_engines.execute_notebook_with_engine( engine_name, nb, input_path=input_path, output_path=output_path if request_save_on_cell_execute else None, kernel_name=kernel_name, progress_bar=progress_bar, log_output=log_output, start_timeout=start_timeout, stdout_file=stdout_file, stderr_file=stderr_file, **engine_kwargs ) # Check for errors first (it saves on error before raising) raise_for_execution_errors(nb, output_path) # Write final output in case the engine didn't write it on cell completion. write_ipynb(nb, output_path) return nb " 7111,"def reinstall_workflow( source: Path, named_run: str, rundir: Path, dry_run: bool = False ) -> str: """"""Reinstall workflow. Args: source: source directory named_run: name of the run e.g. my-flow/run1 rundir: run directory dry_run: if True, will not execute the file transfer but report what would be changed. Raises: WorkflowFilesError: If rsync returns non-zero. Returns: Stdout from the rsync command. """""" validate_source_dir(source, named_run) check_nested_dirs(rundir) reinstall_log = _get_logger( rundir, 'cylc-reinstall', open_file=not dry_run, # don't open the log file for --dry-run ) reinstall_log.info( f'Reinstalling ""{named_run}"", from ""{source}"" to ""{rundir}""' ) rsync_cmd = get_rsync_rund_cmd( source, rundir, reinstall=True, dry_run=dry_run, ) # Add '+++' to -out-format to mark lines passed through formatter. rsync_cmd.append('--out-format=+++%o %n%L+++') # Run rsync command: reinstall_log.info(cli_format(rsync_cmd)) LOG.debug(cli_format(rsync_cmd)) proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec # * command is constructed via internal interface stdout, stderr = proc.communicate() # Strip output of marker. stdout = stdout = '\n'.join(re.findall(r'\+\+\+(.*)\+\+\+', stdout)) stderr = stderr.strip() if proc.returncode != 0: raise WorkflowFilesError( f'An error occurred reinstalling from {source} to {rundir}' f'\n{stderr}' ) check_flow_file(rundir) reinstall_log.info(f'REINSTALLED {named_run} from {source}') print( f'REINSTALL{""ED"" if not dry_run else """"} {named_run} from {source}' ) close_log(reinstall_log) return stdout ","def reinstall_workflow( source: Path, named_run: str, rundir: Path, dry_run: bool = False ) -> str: """"""Reinstall workflow. Args: source: source directory named_run: name of the run e.g. my-flow/run1 rundir: run directory dry_run: if True, will not execute the file transfer but report what would be changed. Raises: WorkflowFilesError: If rsync returns non-zero. Returns: Stdout from the rsync command. """""" validate_source_dir(source, named_run) check_nested_dirs(rundir) reinstall_log = _get_logger( rundir, 'cylc-reinstall', open_file=not dry_run, # don't open the log file for --dry-run ) reinstall_log.info( f'Reinstalling ""{named_run}"", from ""{source}"" to ""{rundir}""' ) rsync_cmd = get_rsync_rund_cmd( source, rundir, reinstall=True, dry_run=dry_run, ) # Add '+++' to -out-format to mark lines passed through formatter. rsync_cmd.append('--out-format=+++%o %n%L+++') # Run rsync command: reinstall_log.info(cli_format(rsync_cmd)) LOG.debug(cli_format(rsync_cmd)) proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec # * command is constructed via internal interface stdout, stderr = proc.communicate() # Strip output of marker. stdout = ('\n'.join(re.findall(r'\+\+\+(.*)\+\+\+', stdout))).strip() stderr = stderr.strip() if proc.returncode != 0: raise WorkflowFilesError( f'An error occurred reinstalling from {source} to {rundir}' f'\n{stderr}' ) check_flow_file(rundir) reinstall_log.info(f'REINSTALLED {named_run} from {source}') print( f'REINSTALL{""ED"" if not dry_run else """"} {named_run} from {source}' ) close_log(reinstall_log) return stdout " 56602,"def plot_bf(trace, var_name, prior, family = 'normal', ref_val=0, xlim=None, ax=None): # grab trace, a variable name to compute difference and prior. # ref_val is the parameter we want to compare from scipy import stats import matplotlib.pyplot as plt import numpy as np # test some elemtns # varName should be string if not isinstance(var_name, str): print('varName is not a string') # BFs based on density estimation (using kernel smoothing instead of spline) # stack trace posterior tr = trace.posterior.stack(draws=(""chain"", ""draw"")) post = tr[var_name] if post.ndim > 1: print(""Posterior distribution has {post.ndim} dimensions"") if family=='normal': # generate vector if xlim is None: x = np.linspace(np.min(prior), np.max(prior),prior.shape[0]) else: x = np.linspace(xlim[0], xlim[1],prior.shape[0]) #x = np.linspace(np.min(post), np.max(post),prior.shape[0]) my_pdf = stats.gaussian_kde(post) prior_pdf = stats.gaussian_kde(prior) elif family!='normal': # for now and error of notImplemented print(""The method for {family} distribution is not implemented yet"") if ax is None: fig, ax = plt.subplots() ax.plot( x, my_pdf(x), ""--"", lw=2.5, alpha=0.6, label=""Posterior"" ) # distribution function ax.plot(x, prior_pdf(x), ""r-"", lw=2.5, alpha=0.6, label=""Prior"") posterior = my_pdf(ref_val) # this gives the pdf at ref_val prior = prior_pdf(ref_val) BF10 = posterior / prior BF01 = prior / posterior print(""the Bayes Factor 10 is %.3f"" % (BF10)) print(""the Bayes Factor 01 is %.3f"" % (BF01)) ax.plot(ref_val, posterior, ""ko"", lw=1.5, alpha=1) ax.plot(ref_val, prior, ""ko"", lw=1.5, alpha=1) plt.xlabel(""Delta"") plt.ylabel(""Density"") plt.legend(loc=""upper left"") plt.show() return {'BF10': BF10, 'BF01':BF01}, ax ","def plot_bf(trace, var_name, prior, family = 'normal', ref_val=0, xlim=None, ax=None): # grab trace, a variable name to compute difference and prior. # ref_val is the parameter we want to compare from scipy import stats import matplotlib.pyplot as plt import numpy as np # test some elemtns # varName should be string if not isinstance(var_name, str): print('varName is not a string') # BFs based on density estimation (using kernel smoothing instead of spline) # stack trace posterior post = extract(idata, var_names=var_name) prior = extract(idata, var_names=var_name, group=""prior"") if post.ndim > 1: print(""Posterior distribution has {post.ndim} dimensions"") if family=='normal': # generate vector if xlim is None: x = np.linspace(np.min(prior), np.max(prior),prior.shape[0]) else: x = np.linspace(xlim[0], xlim[1],prior.shape[0]) #x = np.linspace(np.min(post), np.max(post),prior.shape[0]) my_pdf = stats.gaussian_kde(post) prior_pdf = stats.gaussian_kde(prior) elif family!='normal': # for now and error of notImplemented print(""The method for {family} distribution is not implemented yet"") if ax is None: fig, ax = plt.subplots() ax.plot( x, my_pdf(x), ""--"", lw=2.5, alpha=0.6, label=""Posterior"" ) # distribution function ax.plot(x, prior_pdf(x), ""r-"", lw=2.5, alpha=0.6, label=""Prior"") posterior = my_pdf(ref_val) # this gives the pdf at ref_val prior = prior_pdf(ref_val) BF10 = posterior / prior BF01 = prior / posterior print(""the Bayes Factor 10 is %.3f"" % (BF10)) print(""the Bayes Factor 01 is %.3f"" % (BF01)) ax.plot(ref_val, posterior, ""ko"", lw=1.5, alpha=1) ax.plot(ref_val, prior, ""ko"", lw=1.5, alpha=1) plt.xlabel(""Delta"") plt.ylabel(""Density"") plt.legend(loc=""upper left"") plt.show() return {'BF10': BF10, 'BF01':BF01}, ax " 41267,"def default_repetition_ids(repetitions: IntParam) -> Optional[List[str]]: if isinstance(repetitions, INT_CLASSES) and abs(repetitions) != 1: abs_repetitions: int = abs(repetitions) return [str(i) for i in range(abs_repetitions)] return None ","def default_repetition_ids(repetitions: IntParam) -> Optional[List[str]]: if isinstance(repetitions, INT_CLASSES) and abs(repetitions) != 1: abs_repetitions: int = abs(int(repetitions)) return [str(i) for i in range(abs_repetitions)] return None " 48284,"def main(): argument_spec = url_argument_spec() # setup aliases argument_spec['url_username']['aliases'] = ['username'] argument_spec['url_password']['aliases'] = ['password'] argument_spec.update( url=dict(type='str', required=True), dest=dict(type='path', required=True), backup=dict(type='bool'), sha256sum=dict(type='str', default=''), checksum=dict(type='str', default=''), timeout=dict(type='int', default=10), headers=dict(type='raw'), tmp_dest=dict(type='path'), ) module = AnsibleModule( # not checking because of daisy chain to file module argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True, mutually_exclusive=[['checksum', 'sha256sum']], ) url = module.params['url'] dest = module.params['dest'] backup = module.params['backup'] force = module.params['force'] sha256sum = module.params['sha256sum'] checksum = module.params['checksum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] tmp_dest = module.params['tmp_dest'] result = dict( changed=False, checksum_dest=None, checksum_src=None, dest=dest, elapsed=0, url=url, ) # Parse headers to dict if isinstance(module.params['headers'], dict): headers = module.params['headers'] elif module.params['headers']: try: headers = dict(item.split(':', 1) for item in module.params['headers'].split(',')) module.deprecate('Supplying `headers` as a string is deprecated. Please use dict/hash format for `headers`', version='2.10') except Exception: module.fail_json(msg=""The string representation for the `headers` parameter requires a key:value,key:value syntax to be properly parsed."", **result) else: headers = None dest_is_dir = os.path.isdir(dest) last_mod_time = None # workaround for usage of deprecated sha256sum parameter if sha256sum: checksum = 'sha256:%s' % (sha256sum) # checksum specified, parse for algorithm and checksum if checksum: try: algorithm, checksum = checksum.split(':', 1) except ValueError: module.fail_json(msg=""The checksum parameter has to be in format :"", **result) if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'): checksum_url = checksum # download checksum file to checksum_tmpsrc checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) with open(checksum_tmpsrc) as f: lines = [line.rstrip('\n') for line in f] os.remove(checksum_tmpsrc) checksum_map = {} for line in lines: parts = line.split(None, 1) if len(parts) == 2: checksum_map[parts[0]] = parts[1] filename = url_filename(url) # Look through each line in the checksum file for a hash corresponding to # the filename in the url, returning the first hash that is found. for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename): checksum = cksum break else: checksum = None if checksum is None: module.fail_json(msg=""Unable to find a checksum for file '%s' in '%s'"" % (filename, checksum_url)) # Remove any non-alphanumeric characters, including the infamous # Unicode zero-width space checksum = re.sub(r'\W+', '', checksum).lower() # Ensure the checksum portion is a hexdigest int(checksum, 16) if not dest_is_dir and os.path.exists(dest): checksum_mismatch = False # If the download is not forced and there is a checksum, allow # checksum match to skip the download. if not force and checksum != '': destination_checksum = module.digest_from_file(dest, algorithm) if checksum != destination_checksum: checksum_mismatch = True # Not forcing redownload, unless checksum does not match if not force and not checksum_mismatch: # Not forcing redownload, unless checksum does not match # allow file attribute changes module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest result['changed'] = module.set_fs_attributes_if_different(file_args, False) if result['changed']: module.exit_json(msg=""file already exists but file attributes changed"", **result) module.exit_json(msg=""file already exists"", **result) # If the file already exists, prepare the last modified time for the # request. mtime = os.path.getmtime(dest) last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # If the checksum does not match we have to force the download # because last_mod_time may be newer than on remote if checksum_mismatch: force = True # download to tmpsrc start = datetime.datetime.utcnow() tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) result['elapsed'] = (datetime.datetime.utcnow() - start).seconds result['src'] = tmpsrc # Now the request has completed, we can finally generate the final # destination file name from the info dict. if dest_is_dir: filename = extract_filename_from_headers(info) if not filename: # Fall back to extracting the filename from the URL. # Pluck the URL from the info, since a redirect could have changed # it. filename = url_filename(info['url']) dest = os.path.join(dest, filename) # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): os.remove(tmpsrc) module.fail_json(msg=""Request failed"", status_code=info['status'], response=info['msg'], **result) if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json(msg=""Source %s is not readable"" % (tmpsrc), **result) result['checksum_src'] = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): # raise an error if copy has no permission on dest if not os.access(dest, os.W_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not writable"" % (dest), **result) if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not readable"" % (dest), **result) result['checksum_dest'] = module.sha1(dest) else: if not os.path.exists(os.path.dirname(dest)): os.remove(tmpsrc) module.fail_json(msg=""Destination %s does not exist"" % (os.path.dirname(dest)), **result) if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not writable"" % (os.path.dirname(dest)), **result) if module.check_mode: if os.path.exists(tmpsrc): os.remove(tmpsrc) result['changed'] = ('checksum_dest' not in result or result['checksum_src'] != result['checksum_dest']) module.exit_json(msg=info.get('msg', ''), **result) backup_file = None if result['checksum_src'] != result['checksum_dest']: try: if backup: if os.path.exists(dest): backup_file = module.backup_local(dest) module.atomic_move(tmpsrc, dest) except Exception as e: if os.path.exists(tmpsrc): os.remove(tmpsrc) module.fail_json(msg=""failed to copy %s to %s: %s"" % (tmpsrc, dest, to_native(e)), exception=traceback.format_exc(), **result) result['changed'] = True else: result['changed'] = False if os.path.exists(tmpsrc): os.remove(tmpsrc) if checksum != '': destination_checksum = module.digest_from_file(dest, algorithm) if checksum != destination_checksum: os.remove(dest) module.fail_json(msg=""The checksum for %s did not match %s; it was %s."" % (dest, checksum, destination_checksum), **result) # allow file attribute changes module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed']) # Backwards compat only. We'll return None on FIPS enabled systems try: result['md5sum'] = module.md5(dest) except ValueError: result['md5sum'] = None if backup_file: result['backup_file'] = backup_file # Mission complete module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result) ","def main(): argument_spec = url_argument_spec() # setup aliases argument_spec['url_username']['aliases'] = ['username'] argument_spec['url_password']['aliases'] = ['password'] argument_spec.update( url=dict(type='str', required=True), dest=dict(type='path', required=True), backup=dict(type='bool'), sha256sum=dict(type='str', default=''), checksum=dict(type='str', default=''), timeout=dict(type='int', default=10), headers=dict(type='raw'), tmp_dest=dict(type='path'), ) module = AnsibleModule( # not checking because of daisy chain to file module argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True, mutually_exclusive=[['checksum', 'sha256sum']], ) url = module.params['url'] dest = module.params['dest'] backup = module.params['backup'] force = module.params['force'] sha256sum = module.params['sha256sum'] checksum = module.params['checksum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] tmp_dest = module.params['tmp_dest'] result = dict( changed=False, checksum_dest=None, checksum_src=None, dest=dest, elapsed=0, url=url, ) # Parse headers to dict if isinstance(module.params['headers'], dict): headers = module.params['headers'] elif module.params['headers']: try: headers = dict(item.split(':', 1) for item in module.params['headers'].split(',')) module.deprecate('Supplying `headers` as a string is deprecated. Please use dict/hash format for `headers`', version='2.10') except Exception: module.fail_json(msg=""The string representation for the `headers` parameter requires a key:value,key:value syntax to be properly parsed."", **result) else: headers = None dest_is_dir = os.path.isdir(dest) last_mod_time = None # workaround for usage of deprecated sha256sum parameter if sha256sum: checksum = 'sha256:%s' % (sha256sum) # checksum specified, parse for algorithm and checksum if checksum: try: algorithm, checksum = checksum.split(':', 1) except ValueError: module.fail_json(msg=""The checksum parameter has to be in format :"", **result) if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'): checksum_url = checksum # download checksum file to checksum_tmpsrc checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) with open(checksum_tmpsrc) as f: lines = [line.rstrip('\n') for line in f] os.remove(checksum_tmpsrc) checksum_map = {} for line in lines: parts = line.split(None, 1) if len(parts) == 2: checksum_map[parts[0]] = parts[1] filename = url_filename(url) # Look through each line in the checksum file for a hash corresponding to # the filename in the url, returning the first hash that is found. for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename): checksum = cksum break else: checksum = None if checksum is None: module.fail_json(msg=""Unable to find a checksum for file '%s' in '%s'"" % (filename, checksum_url)) # Remove any non-alphanumeric characters, including the infamous # Unicode zero-width space checksum = re.sub(r'\W+', '', checksum).lower() # Ensure the checksum portion is a hexdigest try: int(checksum, 16) except ValueError: module.fail_json(msg='The checksum format is invalid', **result) if not dest_is_dir and os.path.exists(dest): checksum_mismatch = False # If the download is not forced and there is a checksum, allow # checksum match to skip the download. if not force and checksum != '': destination_checksum = module.digest_from_file(dest, algorithm) if checksum != destination_checksum: checksum_mismatch = True # Not forcing redownload, unless checksum does not match if not force and not checksum_mismatch: # Not forcing redownload, unless checksum does not match # allow file attribute changes module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest result['changed'] = module.set_fs_attributes_if_different(file_args, False) if result['changed']: module.exit_json(msg=""file already exists but file attributes changed"", **result) module.exit_json(msg=""file already exists"", **result) # If the file already exists, prepare the last modified time for the # request. mtime = os.path.getmtime(dest) last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # If the checksum does not match we have to force the download # because last_mod_time may be newer than on remote if checksum_mismatch: force = True # download to tmpsrc start = datetime.datetime.utcnow() tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) result['elapsed'] = (datetime.datetime.utcnow() - start).seconds result['src'] = tmpsrc # Now the request has completed, we can finally generate the final # destination file name from the info dict. if dest_is_dir: filename = extract_filename_from_headers(info) if not filename: # Fall back to extracting the filename from the URL. # Pluck the URL from the info, since a redirect could have changed # it. filename = url_filename(info['url']) dest = os.path.join(dest, filename) # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): os.remove(tmpsrc) module.fail_json(msg=""Request failed"", status_code=info['status'], response=info['msg'], **result) if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json(msg=""Source %s is not readable"" % (tmpsrc), **result) result['checksum_src'] = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): # raise an error if copy has no permission on dest if not os.access(dest, os.W_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not writable"" % (dest), **result) if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not readable"" % (dest), **result) result['checksum_dest'] = module.sha1(dest) else: if not os.path.exists(os.path.dirname(dest)): os.remove(tmpsrc) module.fail_json(msg=""Destination %s does not exist"" % (os.path.dirname(dest)), **result) if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json(msg=""Destination %s is not writable"" % (os.path.dirname(dest)), **result) if module.check_mode: if os.path.exists(tmpsrc): os.remove(tmpsrc) result['changed'] = ('checksum_dest' not in result or result['checksum_src'] != result['checksum_dest']) module.exit_json(msg=info.get('msg', ''), **result) backup_file = None if result['checksum_src'] != result['checksum_dest']: try: if backup: if os.path.exists(dest): backup_file = module.backup_local(dest) module.atomic_move(tmpsrc, dest) except Exception as e: if os.path.exists(tmpsrc): os.remove(tmpsrc) module.fail_json(msg=""failed to copy %s to %s: %s"" % (tmpsrc, dest, to_native(e)), exception=traceback.format_exc(), **result) result['changed'] = True else: result['changed'] = False if os.path.exists(tmpsrc): os.remove(tmpsrc) if checksum != '': destination_checksum = module.digest_from_file(dest, algorithm) if checksum != destination_checksum: os.remove(dest) module.fail_json(msg=""The checksum for %s did not match %s; it was %s."" % (dest, checksum, destination_checksum), **result) # allow file attribute changes module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) file_args['path'] = dest result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed']) # Backwards compat only. We'll return None on FIPS enabled systems try: result['md5sum'] = module.md5(dest) except ValueError: result['md5sum'] = None if backup_file: result['backup_file'] = backup_file # Mission complete module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result) " 58113,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" base_url = urljoin(demisto.params()['url'].removesuffix('/'), '/shnapi/rest') verify_certificate = not demisto.params().get('insecure', False) credentials = demisto.params().get('credentials', {}) handle_proxy() command = demisto.command() demisto.debug(f'Command being called is {command}') try: commands: Dict = { 'mvision-casb-incident-query': incident_query_command, 'mvision-casb-incident-status-update': status_update_command, 'mvision-casb-anomaly-activity-list': anomaly_activity_list_command, 'mvision-casb-policy-dictionary-list': policy_dictionary_list_command, 'mvision-casb-policy-dictionary-update': policy_dictionary_update_command, } client = Client( base_url=base_url, verify=verify_certificate, auth=(credentials.get('identifier'), credentials.get('password')) ) if command == 'test-module': result = test_module(client) return_results(result) if command == 'fetch-incidents': last_run, incidents = fetch_incidents(client, demisto.params()) demisto.setLastRun(last_run) demisto.incidents(incidents) elif command in commands: return_results(commands[command](client, demisto.args())) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() base_url = urljoin(params['url'].removesuffix('/'), '/shnapi/rest') verify_certificate = not params.get('insecure', False) credentials = params.get('credentials', {}) handle_proxy() command = demisto.command() demisto.debug(f'Command being called is {command}') try: commands: Dict = { 'mvision-casb-incident-query': incident_query_command, 'mvision-casb-incident-status-update': status_update_command, 'mvision-casb-anomaly-activity-list': anomaly_activity_list_command, 'mvision-casb-policy-dictionary-list': policy_dictionary_list_command, 'mvision-casb-policy-dictionary-update': policy_dictionary_update_command, } client = Client( base_url=base_url, verify=verify_certificate, auth=(credentials.get('identifier'), credentials.get('password')) ) if command == 'test-module': result = test_module(client) return_results(result) if command == 'fetch-incidents': last_run, incidents = fetch_incidents(client, demisto.params()) demisto.setLastRun(last_run) demisto.incidents(incidents) elif command in commands: return_results(commands[command](client, demisto.args())) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') " 2989,"def round_trip_pickle(obj, path: Optional[str] = None): """""" Pickle an object and then read it again. Parameters ---------- obj : pandas object The object to pickle and then re-read. path : str, default None The path where the pickled object is written and then read. Returns ------- pandas object The original object that was pickled and then re-read. """""" if path is None: path = f""__{rands(10)}__.pickle"" with ensure_clean(path) as path: pd.to_pickle(obj, path) return pd.read_pickle(path) ","def round_trip_pickle(obj: FrameOrSeries, path: Optional[str] = None) -> FrameOrSeries: """""" Pickle an object and then read it again. Parameters ---------- obj : pandas object The object to pickle and then re-read. path : str, default None The path where the pickled object is written and then read. Returns ------- pandas object The original object that was pickled and then re-read. """""" if path is None: path = f""__{rands(10)}__.pickle"" with ensure_clean(path) as path: pd.to_pickle(obj, path) return pd.read_pickle(path) " 50660,"def expand(vevent, href=''): """""" Constructs a list of start and end dates for all recurring instances of the event defined in vevent. It considers RRULE as well as RDATE and EXDATE properties. In case of unsupported recursion rules an UnsupportedRecurrence exception is thrown. If the vevent contains a RECURRENCE-ID property, no expansion is done, the function still returns a tuple of start and end (date)times. :param vevent: vevent to be expanded :type vevent: icalendar.cal.Event :param href: the href of the vevent, used for more informative logging and nothing else :type href: str :returns: list of start and end (date)times of the expanded event :rtype: list(tuple(datetime, datetime)) """""" # we do this now and than never care about the ""real"" end time again if 'DURATION' in vevent: duration = vevent['DURATION'].dt else: duration = vevent['DTEND'].dt - vevent['DTSTART'].dt # if this vevent has a RECURRENCE_ID property, no expansion will be # performed expand = not bool(vevent.get('RECURRENCE-ID')) events_tz = getattr(vevent['DTSTART'].dt, 'tzinfo', None) allday = not isinstance(vevent['DTSTART'].dt, dt.datetime) def sanitize_datetime(date): if allday and isinstance(date, dt.datetime): date = date.date() if events_tz is not None: date = events_tz.localize(date) return date rrule_param = vevent.get('RRULE') if expand and rrule_param is not None: vevent = sanitize_rrule(vevent) # dst causes problem while expanding the rrule, therefore we transform # everything to naive datetime objects and transform back after # expanding # See https://github.com/dateutil/dateutil/issues/102 dtstart = vevent['DTSTART'].dt if events_tz: dtstart = dtstart.replace(tzinfo=None) rrule = dateutil.rrule.rrulestr( rrule_param.to_ical().decode(), dtstart=dtstart, ignoretz=True, ) if rrule._until is None: # rrule really doesn't like to calculate all recurrences until # eternity, so we only do it until 2037, because a) I'm not sure # if python can deal with larger datetime values yet and b) pytz # doesn't know any larger transition times rrule._until = dt.datetime(2037, 12, 31) else: if events_tz and 'Z' in rrule_param.to_ical().decode(): rrule._until = pytz.UTC.localize( rrule._until).astimezone(events_tz).replace(tzinfo=None) # rrule._until and dtstart could be dt.date or dt.datetime. They # need to be the same for comparison testuntil = rrule._until if (type(dtstart) == dt.date and type(testuntil) == dt.datetime): testuntil = testuntil.date() teststart = dtstart if (type(testuntil) == dt.date and type(teststart) == dt.datetime): teststart = teststart.date() if testuntil < teststart: logger.warning( '{}: Unsupported recurrence. UNTIL is before DTSTART.\n' 'This event will not be available in khal.'.format(href)) return False if rrule.count() == 0: logger.warning( '{}: Recurrence defined but will never occur.\n' 'This event will not be available in khal.'.format(href)) return False rrule = map(sanitize_datetime, rrule) logger.debug(f'calculating recurrence dates for {href}, this might take some time.') # RRULE and RDATE may specify the same date twice, it is recommended by # the RFC to consider this as only one instance dtstartl = set(rrule) if not dtstartl: raise UnsupportedRecurrence() else: dtstartl = {vevent['DTSTART'].dt} def get_dates(vevent, key): # TODO replace with get_all_properties dates = vevent.get(key) if dates is None: return if not isinstance(dates, list): dates = [dates] dates = (leaf.dt for tree in dates for leaf in tree.dts) dates = localize_strip_tz(dates, events_tz) return map(sanitize_datetime, dates) # include explicitly specified recursion dates if expand: dtstartl.update(get_dates(vevent, 'RDATE') or ()) # remove excluded dates if expand: for date in get_dates(vevent, 'EXDATE') or (): try: dtstartl.remove(date) except KeyError: logger.warning( 'In event {}, excluded instance starting at {} not found, ' 'event might be invalid.'.format(href, date)) dtstartend = [(start, start + duration) for start in dtstartl] # not necessary, but I prefer deterministic output dtstartend.sort() return dtstartend ","def expand(vevent, href=''): """""" Constructs a list of start and end dates for all recurring instances of the event defined in vevent. It considers RRULE as well as RDATE and EXDATE properties. In case of unsupported recursion rules an UnsupportedRecurrence exception is thrown. If the vevent contains a RECURRENCE-ID property, no expansion is done, the function still returns a tuple of start and end (date)times. :param vevent: vevent to be expanded :type vevent: icalendar.cal.Event :param href: the href of the vevent, used for more informative logging and nothing else :type href: str :returns: list of start and end (date)times of the expanded event :rtype: list(tuple(datetime, datetime)) """""" # we do this now and than never care about the ""real"" end time again if 'DURATION' in vevent: duration = vevent['DURATION'].dt else: duration = vevent['DTEND'].dt - vevent['DTSTART'].dt # if this vevent has a RECURRENCE_ID property, no expansion will be # performed expand = not bool(vevent.get('RECURRENCE-ID')) events_tz = getattr(vevent['DTSTART'].dt, 'tzinfo', None) allday = not isinstance(vevent['DTSTART'].dt, dt.datetime) def sanitize_datetime(date): if allday and isinstance(date, dt.datetime): date = date.date() if events_tz is not None: date = events_tz.localize(date) return date rrule_param = vevent.get('RRULE') if expand and rrule_param is not None: vevent = sanitize_rrule(vevent) # dst causes problem while expanding the rrule, therefore we transform # everything to naive datetime objects and transform back after # expanding # See https://github.com/dateutil/dateutil/issues/102 dtstart = vevent['DTSTART'].dt if events_tz: dtstart = dtstart.replace(tzinfo=None) rrule = dateutil.rrule.rrulestr( rrule_param.to_ical().decode(), dtstart=dtstart, ignoretz=True, ) if rrule._until is None: # rrule really doesn't like to calculate all recurrences until # eternity, so we only do it until 2037, because a) I'm not sure # if python can deal with larger datetime values yet and b) pytz # doesn't know any larger transition times rrule._until = dt.datetime(2037, 12, 31) else: if events_tz and 'Z' in rrule_param.to_ical().decode(): rrule._until = pytz.UTC.localize( rrule._until).astimezone(events_tz).replace(tzinfo=None) # rrule._until and dtstart could be dt.date or dt.datetime. They # need to be the same for comparison testuntil = rrule._until if (type(dtstart) == dt.date and type(testuntil) == dt.datetime): testuntil = testuntil.date() teststart = dtstart if (type(testuntil) == dt.date and type(teststart) == dt.datetime): teststart = teststart.date() if testuntil < teststart: logger.warning( '{}: Unsupported recurrence. UNTIL is before DTSTART.\n' 'This event will not be available in khal.'.format(href)) return False if rrule.count() == 0: logger.warning( f'{href}: Recurrence defined but will never occur.\n' 'This event will not be available in khal.') return False rrule = map(sanitize_datetime, rrule) logger.debug(f'calculating recurrence dates for {href}, this might take some time.') # RRULE and RDATE may specify the same date twice, it is recommended by # the RFC to consider this as only one instance dtstartl = set(rrule) if not dtstartl: raise UnsupportedRecurrence() else: dtstartl = {vevent['DTSTART'].dt} def get_dates(vevent, key): # TODO replace with get_all_properties dates = vevent.get(key) if dates is None: return if not isinstance(dates, list): dates = [dates] dates = (leaf.dt for tree in dates for leaf in tree.dts) dates = localize_strip_tz(dates, events_tz) return map(sanitize_datetime, dates) # include explicitly specified recursion dates if expand: dtstartl.update(get_dates(vevent, 'RDATE') or ()) # remove excluded dates if expand: for date in get_dates(vevent, 'EXDATE') or (): try: dtstartl.remove(date) except KeyError: logger.warning( 'In event {}, excluded instance starting at {} not found, ' 'event might be invalid.'.format(href, date)) dtstartend = [(start, start + duration) for start in dtstartl] # not necessary, but I prefer deterministic output dtstartend.sort() return dtstartend " 35039,"def batch_matmul( tensor_a, tensor_b, oshape=None, out_dtype=None, transpose_a=False, transpose_b=True, auto_scheduler_rewritten_layout="""", ): """"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are data in batch. Supports broadcasting for batch dimension. The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed and tensor_b transposed) by default. Parameters ---------- tensor_a : tvm.te.Tensor 3-D with shape [batch, M, K] or [batch, K, M] tensor_b : tvm.te.Tensor 3-D with shape [batch, K, N] or [batch, N, K] oshape : List[Optional] Explicit intended output shape of the computation. Can be useful in cases with dynamic input shapes. auto_scheduler_rewritten_layout: Optional[str] = """" The layout after auto-scheduler's layout rewrite pass. out_dtype : Optional[str] Specifies the output data type for mixed precision batch matmul transpose_a : Optional[bool] = False Whether the data tensor is in transposed format. transpose_b : Optional[bool] = True Whether the weight tensor is in transposed format. Returns ------- output : tvm.te.Tensor 3-D with shape [batch, M, N] """""" assert len(tensor_a.shape) == 3, ""only support 3-dim batch_matmul"" if transpose_a: XB, XK, XI = get_const_tuple(tensor_a.shape) else: XB, XI, XK = get_const_tuple(tensor_a.shape) if auto_scheduler_rewritten_layout: # Infer shape for the rewritten layout YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout( auto_scheduler_rewritten_layout, [""b"", ""k"", ""j""] ) auto_scheduler.remove_index_check(tensor_b) else: assert len(tensor_b.shape) == 3, ""only support 3-dim batch_matmul"" if transpose_b: YB, YJ, YK = get_const_tuple(tensor_b.shape) else: YB, YK, YJ = get_const_tuple(tensor_b.shape) assert XK == YK or isinstance(YK, tvm.tir.expr.Var), ""shapes of x and y is inconsistent"" k = te.reduce_axis((0, XK), name=""k"") if oshape is None: assert XB == YB or XB == 1 or YB == 1, ""batch dimension doesn't match"" batch = ( tvm.tir.Any() if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var) else te.max(XB, YB) ) oshape = (batch, XI, YJ) if out_dtype is None: out_dtype = tensor_a.dtype if (transpose_a, transpose_b) == (True, True): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype) * tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_TT"" elif (transpose_a, transpose_b) == (True, False): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype) * tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_TN"" elif (transpose_a, transpose_b) == (False, True): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype) * tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_NT"" else: # (transpose_a, transpose_b) == (False, False): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype) * tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_NN"" output = te.compute( oshape, compute_lambda, name=compute_name, tag=""batch_matmul"", attrs={""layout_free_placeholders"": [tensor_b]}, ) if auto_scheduler_rewritten_layout: output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout) return output ","def batch_matmul( tensor_a, tensor_b, oshape=None, out_dtype=None, transpose_a=False, transpose_b=True, auto_scheduler_rewritten_layout="""", ): """"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are data in batch. Supports broadcasting for batch dimension. The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed and tensor_b transposed) by default. Parameters ---------- tensor_a : tvm.te.Tensor 3-D with shape [batch, M, K] or [batch, K, M] tensor_b : tvm.te.Tensor 3-D with shape [batch, K, N] or [batch, N, K] oshape : List[Optional] Explicit intended output shape of the computation. Can be useful in cases with dynamic input shapes. auto_scheduler_rewritten_layout: Optional[str] = """" The layout after auto-scheduler's layout rewrite pass. out_dtype : Optional[str] Specifies the output data type for mixed precision batch matmul transpose_a : Optional[bool] = False Whether the data tensor is in transposed format. transpose_b : Optional[bool] = True Whether the weight tensor is in transposed format. Returns ------- output : tvm.te.Tensor 3-D with shape [batch, M, N] """""" assert len(tensor_a.shape) == 3, ""only support 3-dim tensor_a"" if transpose_a: XB, XK, XI = get_const_tuple(tensor_a.shape) else: XB, XI, XK = get_const_tuple(tensor_a.shape) if auto_scheduler_rewritten_layout: # Infer shape for the rewritten layout YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout( auto_scheduler_rewritten_layout, [""b"", ""k"", ""j""] ) auto_scheduler.remove_index_check(tensor_b) else: assert len(tensor_b.shape) == 3, ""only support 3-dim batch_matmul"" if transpose_b: YB, YJ, YK = get_const_tuple(tensor_b.shape) else: YB, YK, YJ = get_const_tuple(tensor_b.shape) assert XK == YK or isinstance(YK, tvm.tir.expr.Var), ""shapes of x and y is inconsistent"" k = te.reduce_axis((0, XK), name=""k"") if oshape is None: assert XB == YB or XB == 1 or YB == 1, ""batch dimension doesn't match"" batch = ( tvm.tir.Any() if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var) else te.max(XB, YB) ) oshape = (batch, XI, YJ) if out_dtype is None: out_dtype = tensor_a.dtype if (transpose_a, transpose_b) == (True, True): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype) * tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_TT"" elif (transpose_a, transpose_b) == (True, False): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype) * tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_TN"" elif (transpose_a, transpose_b) == (False, True): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype) * tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_NT"" else: # (transpose_a, transpose_b) == (False, False): compute_lambda = lambda b, i, j: te.sum( tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype) * tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype), axis=k, ) compute_name = ""T_batch_matmul_NN"" output = te.compute( oshape, compute_lambda, name=compute_name, tag=""batch_matmul"", attrs={""layout_free_placeholders"": [tensor_b]}, ) if auto_scheduler_rewritten_layout: output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout) return output " 58033,"def logs_search_command(client: Client, args) -> CommandResults: limit = args.get('limit') ip = args.get('ip') interface = args.get('interface') results = client.log_search(limit) if interface: if ip: sresult = [x for x in results if x['interface'] == interface if (x['dst'] == ip or x['src'] == ip)] else: sresult = [x for x in results if x['interface'] == interface] elif ip: sresult = [x for x in results if (x['dst'] == ip or x['src'] == ip)] if interface or ip: res = sresult else: res = results if res: pretty = [] for result in res: display = {'interface', 'src', 'srcport', 'dst', 'dstport', 'action', '__timestamp__', 'protoname', 'label'} pretty.append(with_keys(result, display)) return output_format(pretty, 'Logs', readable='firewall logs') else: return CommandResults(readable_output='Nothing found') ","def logs_search_command(client: Client, args) -> CommandResults: limit = args.get('limit') ip = args.get('ip') interface = args.get('interface') results = client.log_search(limit) if interface: if ip: res = [x for x in results if x['interface'] == interface if (x['dst'] == ip or x['src'] == ip)] else: res = [x for x in results if x['interface'] == interface] elif ip: res = [x for x in results if (x['dst'] == ip or x['src'] == ip)] else: res = results if res: pretty = [] for result in res: display = {'interface', 'src', 'srcport', 'dst', 'dstport', 'action', '__timestamp__', 'protoname', 'label'} pretty.append(with_keys(result, display)) return output_format(pretty, 'Logs', readable='firewall logs') else: return CommandResults(readable_output='Nothing found') " 35247,"def polyder(p, m=1): """"""Returns the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate m : int, optional Order of differentiation. By default, 1 Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- numpy.polyder """""" m = int(m) if m < 0: raise ValueError(""Order of derivative must be positive."") truepoly = isinstance(p, cupy.poly1d) p = cupy.asarray(p) n = len(p) - 1 y = p[:-1] * cupy.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = cupy.poly1d(val) return val ","def polyder(p, m=1): """"""Returns the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate m : int, optional Order of differentiation. By default, 1 Returns ------- der : cupy.ndarray or cupy.poly1d A new polynomial representing the derivative. See Also -------- numpy.polyder """""" m = int(m) if m < 0: raise ValueError(""Order of derivative must be positive."") truepoly = isinstance(p, cupy.poly1d) p = cupy.asarray(p) n = len(p) - 1 y = p[:-1] * cupy.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = cupy.poly1d(val) return val " 39430,"def convert_string_array(arr, name=None): """"""Convert a numpy array of strings to a vtkStringArray or vice versa. Parameters ---------- arr : numpy.ndarray Numpy string array to convert. name : str, optional Name to set the vtkStringArray to. Returns ------- vtkStringArray VTK string array. Notes ----- Note that this is terribly inefficient. If you have ideas on how to make this faster, please consider opening a pull request. """""" if isinstance(arr, np.ndarray): # VTK default fonts only support ASCII. See https://gitlab.kitware.com/vtk/vtk/-/issues/16904 if not str(arr).isascii(): # avoids segfault raise ValueError( 'String array contains non-ASCII characters that are not supported by VTK.' ) vtkarr = _vtk.vtkStringArray() ########### OPTIMIZE ########### for val in arr: vtkarr.InsertNextValue(val) ################################ if isinstance(name, str): vtkarr.SetName(name) return vtkarr # Otherwise it is a vtk array and needs to be converted back to numpy ############### OPTIMIZE ############### nvalues = arr.GetNumberOfValues() return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U') ######################################## ","def convert_string_array(arr, name=None): """"""Convert a numpy array of strings to a vtkStringArray or vice versa. Parameters ---------- arr : numpy.ndarray Numpy string array to convert. name : str, optional Name to set the vtkStringArray to. Returns ------- vtkStringArray VTK string array. Notes ----- Note that this is terribly inefficient. If you have ideas on how to make this faster, please consider opening a pull request. """""" if isinstance(arr, np.ndarray): # VTK default fonts only support ASCII. See https://gitlab.kitware.com/vtk/vtk/-/issues/16904 if not ''.join(arr).isascii(): # avoids segfault raise ValueError( 'String array contains non-ASCII characters that are not supported by VTK.' ) vtkarr = _vtk.vtkStringArray() ########### OPTIMIZE ########### for val in arr: vtkarr.InsertNextValue(val) ################################ if isinstance(name, str): vtkarr.SetName(name) return vtkarr # Otherwise it is a vtk array and needs to be converted back to numpy ############### OPTIMIZE ############### nvalues = arr.GetNumberOfValues() return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U') ######################################## " 15535,"def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the available OctoPrint binary sensors."""""" if discovery_info is None: return name = discovery_info[""name""] base_url = discovery_info[""base_url""] monitored_conditions = discovery_info[""sensors""] coordinator: DataUpdateCoordinator = hass.data[COMPONENT_DOMAIN][base_url] devices = [] if ""Printing"" in monitored_conditions: devices.append(OctoPrintPrintingBinarySensor(coordinator, name)) if ""Printing Error"" in monitored_conditions: devices.append(OctoPrintPrintingErrorBinarySensor(coordinator, name)) add_entities(devices, True) ","def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the available OctoPrint binary sensors."""""" if discovery_info is None: return name = discovery_info[""name""] base_url = discovery_info[""base_url""] monitored_conditions = discovery_info[""sensors""] coordinator: DataUpdateCoordinator = hass.data[COMPONENT_DOMAIN][base_url] entities = [] if ""Printing"" in monitored_conditions: devices.append(OctoPrintPrintingBinarySensor(coordinator, name)) if ""Printing Error"" in monitored_conditions: devices.append(OctoPrintPrintingErrorBinarySensor(coordinator, name)) add_entities(devices, True) " 21552,"def main(args, environ): mode = args[1] if len(args) > 1 else ""run"" desired_uid = int(environ.get(""UID"", ""991"")) desired_gid = int(environ.get(""GID"", ""991"")) synapse_worker = environ.get(""SYNAPSE_WORKER"", ""synapse.app.homeserver"") if (desired_uid == os.getuid()) and (desired_gid == os.getgid()): ownership = None else: ownership = ""{}:{}"".format(desired_uid, desired_gid) if ownership is None: log(""Will not perform chmod/gosu as UserID already matches request"") # In generate mode, generate a configuration and missing keys, then exit if mode == ""generate"": return run_generate_config(environ, ownership) if mode == ""migrate_config"": # generate a config based on environment vars. config_dir = environ.get(""SYNAPSE_CONFIG_DIR"", ""/data"") config_path = environ.get( ""SYNAPSE_CONFIG_PATH"", config_dir + ""/homeserver.yaml"" ) return generate_config_from_template( config_dir, config_path, environ, ownership ) if mode != ""run"": error(""Unknown execution mode '%s'"" % (mode,)) args = args[2:] if ""-m"" not in args: args = [""-m"", synapse_worker] + args jemallocpath = f""/usr/lib/{platform.machine()}-linux-gnu/libjemalloc.so.2"" if os.path.isfile(jemallocpath): environ[""LD_PRELOAD""] = jemallocpath else: log(f""Could not find {jemallocpath}, will not use"") # if there are no config files passed to synapse, try adding the default file if not any(p.startswith(""--config-path"") or p.startswith(""-c"") for p in args): config_dir = environ.get(""SYNAPSE_CONFIG_DIR"", ""/data"") config_path = environ.get( ""SYNAPSE_CONFIG_PATH"", config_dir + ""/homeserver.yaml"" ) if not os.path.exists(config_path): if ""SYNAPSE_SERVER_NAME"" in environ: error( """"""\ Config file '%s' does not exist. The synapse docker image no longer supports generating a config file on-the-fly based on environment variables. You can migrate to a static config file by running with 'migrate_config'. See the README for more details. """""" % (config_path,) ) error( ""Config file '%s' does not exist. You should either create a new "" ""config file by running with the `generate` argument (and then edit "" ""the resulting file before restarting) or specify the path to an "" ""existing config file with the SYNAPSE_CONFIG_PATH variable."" % (config_path,) ) args += [""--config-path"", config_path] log(""Starting synapse with args "" + "" "".join(args)) args = [""python""] + args if ownership is not None: args = [""gosu"", ownership] + args os.execve(""/usr/sbin/gosu"", args, environ) else: os.execve(""/usr/local/bin/python"", args, environ) ","def main(args, environ): mode = args[1] if len(args) > 1 else ""run"" desired_uid = int(environ.get(""UID"", ""991"")) desired_gid = int(environ.get(""GID"", ""991"")) synapse_worker = environ.get(""SYNAPSE_WORKER"", ""synapse.app.homeserver"") if (desired_uid == os.getuid()) and (desired_gid == os.getgid()): ownership = None else: ownership = ""{}:{}"".format(desired_uid, desired_gid) if ownership is None: log(""Will not perform chmod/gosu as UserID already matches request"") # In generate mode, generate a configuration and missing keys, then exit if mode == ""generate"": return run_generate_config(environ, ownership) if mode == ""migrate_config"": # generate a config based on environment vars. config_dir = environ.get(""SYNAPSE_CONFIG_DIR"", ""/data"") config_path = environ.get( ""SYNAPSE_CONFIG_PATH"", config_dir + ""/homeserver.yaml"" ) return generate_config_from_template( config_dir, config_path, environ, ownership ) if mode != ""run"": error(""Unknown execution mode '%s'"" % (mode,)) args = args[2:] if ""-m"" not in args: args = [""-m"", synapse_worker] + args jemallocpath = f""/usr/lib/{platform.machine()}-linux-gnu/libjemalloc.so.2"" if os.path.isfile(jemallocpath): environ[""LD_PRELOAD""] = jemallocpath else: log(""Could not find %s, will not use"" % (jemallocpath,)) # if there are no config files passed to synapse, try adding the default file if not any(p.startswith(""--config-path"") or p.startswith(""-c"") for p in args): config_dir = environ.get(""SYNAPSE_CONFIG_DIR"", ""/data"") config_path = environ.get( ""SYNAPSE_CONFIG_PATH"", config_dir + ""/homeserver.yaml"" ) if not os.path.exists(config_path): if ""SYNAPSE_SERVER_NAME"" in environ: error( """"""\ Config file '%s' does not exist. The synapse docker image no longer supports generating a config file on-the-fly based on environment variables. You can migrate to a static config file by running with 'migrate_config'. See the README for more details. """""" % (config_path,) ) error( ""Config file '%s' does not exist. You should either create a new "" ""config file by running with the `generate` argument (and then edit "" ""the resulting file before restarting) or specify the path to an "" ""existing config file with the SYNAPSE_CONFIG_PATH variable."" % (config_path,) ) args += [""--config-path"", config_path] log(""Starting synapse with args "" + "" "".join(args)) args = [""python""] + args if ownership is not None: args = [""gosu"", ownership] + args os.execve(""/usr/sbin/gosu"", args, environ) else: os.execve(""/usr/local/bin/python"", args, environ) " 42921,"def c_0(clique: list, graph: nx.Graph): """"""Generates the set :math:`C_0` of nodes that are connected to all nodes in the input clique subgraph. The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes that can be added to the current clique to grow it into a larger one. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> subgraph = [0, 1, 2, 3, 4] >>> utils.c_0(subgraph, graph) [5, 6, 7, 8, 9] Args: clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique. graph (nx.Graph): The input graph. Returns: list[int]: A list containing the :math:`C_0` nodes for the clique. """""" if not is_clique(graph.subgraph(clique)): raise ValueError(""Input subgraph is not a clique"") clique = set(clique) c_0_nodes = [] non_clique_nodes = set(graph.nodes) - clique for i in non_clique_nodes: if clique.issubset(graph.neighbors(i)): c_0_nodes.append(i) return c_0_nodes ","def c_0(clique: list, graph: nx.Graph): """"""Generates the set :math:`C_0` of nodes that are connected to all nodes in the input clique subgraph. The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes that can be added to the current clique to grow it into a larger one. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> subgraph = [0, 1, 2, 3, 4] >>> utils.c_0(subgraph, graph) [5, 6, 7, 8, 9] Args: clique (list[int]): a subgraph specified by a list of nodes; the subgraph must be a clique graph (nx.Graph): The input graph. Returns: list[int]: A list containing the :math:`C_0` nodes for the clique. """""" if not is_clique(graph.subgraph(clique)): raise ValueError(""Input subgraph is not a clique"") clique = set(clique) c_0_nodes = [] non_clique_nodes = set(graph.nodes) - clique for i in non_clique_nodes: if clique.issubset(graph.neighbors(i)): c_0_nodes.append(i) return c_0_nodes " 21967,"def authIfV2(sydent, request, requireTermsAgreed=True): if request.path.startswith('/_matrix/identity/v2'): token = tokenFromRequest(request) if token is None: raise MatrixRestError(403, ""M_UNAUTHORIZED"", ""Unauthorized"") accountStore = AccountStore(sydent) account = accountStore.getAccountByToken(token) if account is None: raise MatrixRestError(403, ""M_UNAUTHORIZED"", ""Unauthorized"") if requireTermsAgreed: terms = get_terms(sydent) if ( terms.getMasterVersion() is not None and account.consentVersion != terms.getMasterVersion() ): raise MatrixRestError(403, ""M_TERMS_NOT_SIGNED"", ""Terms not signed"") return account return None ","def authIfV2(sydent, request, requireTermsAgreed=True): if request.path.startswith('/_matrix/identity/v2'): token = tokenFromRequest(request) if token is None: raise MatrixRestError(403, ""M_UNAUTHORIZED"", ""Unauthorized"") accountStore = AccountStore(sydent) account = accountStore.getAccountByToken(token) if account is None: raise MatrixRestError(401, ""M_UNAUTHORIZED"", ""Unauthorized"") if requireTermsAgreed: terms = get_terms(sydent) if ( terms.getMasterVersion() is not None and account.consentVersion != terms.getMasterVersion() ): raise MatrixRestError(403, ""M_TERMS_NOT_SIGNED"", ""Terms not signed"") return account return None " 6929,"def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False): """"""walk and sync all doctypes and pages"""""" if not files: files = [] # load in sequence - warning for devs document_types = ['doctype', 'page', 'report', 'dashboard_chart_source', 'print_format', 'website_theme', 'web_form', 'web_template', 'notification', 'print_style', 'data_migration_mapping', 'data_migration_plan', 'workspace', 'onboarding_step', 'module_onboarding'] for doctype in document_types: doctype_path = os.path.join(start_path, doctype) if os.path.exists(doctype_path): for docname in os.listdir(doctype_path): if os.path.isdir(os.path.join(doctype_path, docname)): doc_path = os.path.join(doctype_path, docname, docname) + "".json"" if os.path.exists(doc_path): if not doc_path in files: files.append(doc_path) return files","def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False): """"""walk and sync all doctypes and pages"""""" files = files or [] # load in sequence - warning for devs document_types = ['doctype', 'page', 'report', 'dashboard_chart_source', 'print_format', 'website_theme', 'web_form', 'web_template', 'notification', 'print_style', 'data_migration_mapping', 'data_migration_plan', 'workspace', 'onboarding_step', 'module_onboarding'] for doctype in document_types: doctype_path = os.path.join(start_path, doctype) if os.path.exists(doctype_path): for docname in os.listdir(doctype_path): if os.path.isdir(os.path.join(doctype_path, docname)): doc_path = os.path.join(doctype_path, docname, docname) + "".json"" if os.path.exists(doc_path): if not doc_path in files: files.append(doc_path) return files" 30463,"def main(): # get incident fields res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'}) if is_error(res): return_error(res[0]['Contents']) fields = res[0]['Contents']['response'] # 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields incident_fields = [field for field in fields if field['id'].startswith('incident_')] # get arguments args = demisto.args() incident_type = args['incident_type'] exclude_system = False if 'custom' in args and argToBoolean(args['custom']) is True: exclude_system = True name_key = 'name' if 'short_names' in args and argToBoolean(args['short_names']) is True: name_key = 'cliName' explicit_only = False if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True: explicit_only = True # generate results types = [] if exclude_system is True: # only return non-system fields for field in incident_fields: # using multiple if statements for readability if field['system'] is False: # exclude system fields if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) else: # return all fields for field in incident_fields: # using multiple if statements for readability if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) # output results if 'pprint' in args and argToBoolean(args['pprint']) is True: demisto.results(pformat(types)) else: demisto.results(types) ","def main(): # get incident fields res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'}) if is_error(res): return_error(res[0]['Contents']) fields = res[0]['Contents']['response'] # 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields incident_fields = [field for field in fields if field['id'].startswith('incident_')] # get arguments args = demisto.args() incident_type = args['incident_type'] exclude_system = False if 'custom' in args and argToBoolean(args['custom']) is True: exclude_system_fields = True name_key = 'name' if 'short_names' in args and argToBoolean(args['short_names']) is True: name_key = 'cliName' explicit_only = False if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True: explicit_only = True # generate results types = [] if exclude_system is True: # only return non-system fields for field in incident_fields: # using multiple if statements for readability if field['system'] is False: # exclude system fields if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) else: # return all fields for field in incident_fields: # using multiple if statements for readability if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) # output results if 'pprint' in args and argToBoolean(args['pprint']) is True: demisto.results(pformat(types)) else: demisto.results(types) " 38837,"def main(prog: Optional[str] = None) -> None: """""" The `allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag or you make your code available as a plugin (see `allennlp.common.plugins`). """""" import_plugins() parser = create_parser(prog) args = parser.parse_args() # If a subparser is triggered, it adds its work as `args.func`. # So if no such attribute has been added, no subparser was triggered, # so give the user some help. if ""func"" in dir(args): # Import any additional modules needed (to register custom classes). for package_name in args.include_package: import_module_and_submodules(package_name) args.func(args) else: parser.print_help() ","def main(prog: Optional[str] = None) -> None: """""" `allennlp.run` only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag or you make your code available as a plugin (see `allennlp.common.plugins`). """""" import_plugins() parser = create_parser(prog) args = parser.parse_args() # If a subparser is triggered, it adds its work as `args.func`. # So if no such attribute has been added, no subparser was triggered, # so give the user some help. if ""func"" in dir(args): # Import any additional modules needed (to register custom classes). for package_name in args.include_package: import_module_and_submodules(package_name) args.func(args) else: parser.print_help() " 4178,"def bootstrap_ci(arr, ci=.95, n_bootstraps=2000, stat_fun='mean', random_state=None): """"""Get confidence intervals from non-parametric bootstrap. Parameters ---------- arr : ndarray The input data on which to calculate the confidence interval. ci : float Level of the confidence interval between 0 and 1. n_bootstraps : int Number of bootstraps stat_fun : str | callable Can be ""mean"", ""median"", or a callable operating along `axis=0`. random_state : int | float | array_like | None The seed at which to initialize the bootstrap. Returns ------- cis : ndarray Containing the lower boundary of the CI at `cis[0, ...]` and the upper boundary of the CI at `cis[1, ...]`. """""" if stat_fun == ""mean"": def stat_fun(x): return x.mean(axis=0) elif stat_fun == 'median': def stat_fun(x): return np.median(x, axis=0) elif not callable(stat_fun): raise ValueError(""stat_fun must be 'mean', 'median' or callable."") n_trials = arr.shape[0] indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too rng = check_random_state(random_state) boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices))) stat = np.array([stat_fun(arr[inds]) for inds in boot_indices]) ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100) ci_low, ci_up = np.percentile(stat, ci, axis=0) return np.array([ci_low, ci_up]) ","def bootstrap_ci(arr, ci=.95, n_bootstraps=2000, stat_fun='mean', random_state=None): """"""Get confidence intervals from non-parametric bootstrap. Parameters ---------- arr : ndarray, shape (n_samples, ...) The input data on which to calculate the confidence interval. ci : float Level of the confidence interval between 0 and 1. n_bootstraps : int Number of bootstraps stat_fun : str | callable Can be ""mean"", ""median"", or a callable operating along `axis=0`. random_state : int | float | array_like | None The seed at which to initialize the bootstrap. Returns ------- cis : ndarray Containing the lower boundary of the CI at `cis[0, ...]` and the upper boundary of the CI at `cis[1, ...]`. """""" if stat_fun == ""mean"": def stat_fun(x): return x.mean(axis=0) elif stat_fun == 'median': def stat_fun(x): return np.median(x, axis=0) elif not callable(stat_fun): raise ValueError(""stat_fun must be 'mean', 'median' or callable."") n_trials = arr.shape[0] indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too rng = check_random_state(random_state) boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices))) stat = np.array([stat_fun(arr[inds]) for inds in boot_indices]) ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100) ci_low, ci_up = np.percentile(stat, ci, axis=0) return np.array([ci_low, ci_up]) " 45944,"def get_hanning_kernel1d(kernel_size: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r""""""Returns Hanning (also known as Hann) kernel, used in signal processing and KCF tracker .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 See further in numpy docs https://numpy.org/doc/stable/reference/generated/numpy.hanning.html Args: kernel_size: It should be positive. Returns: 1D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size})` Examples: >>> get_hanning_kernel1d(4) tensor([ 0., 0.75, 0.75, 0.]) """""" if not isinstance(kernel_size, int) or kernel_size <= 2: raise TypeError(f""ksize must be an positive integer > 2. Got {kernel_size}"") x: torch.Tensor = torch.arange(kernel_size, device=device, dtype=dtype) x = 0.5 - 0.5 * torch.cos(2.0 * pi * x / float(kernel_size - 1)) return x ","def get_hanning_kernel1d(kernel_size: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r""""""Returns Hanning (also known as Hann) kernel, used in signal processing and KCF tracker .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 See further in numpy docs https://numpy.org/doc/stable/reference/generated/numpy.hanning.html Args: kernel_size: It should be positive. Returns: 1D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size})` Examples: >>> get_hanning_kernel1d(4) tensor([ 0., 0.75, 0.75, 0.]) """""" if not isinstance(kernel_size, int) or kernel_size <= 2: raise TypeError(f""ksize must be an positive integer > 2. Got {kernel_size}"") x: torch.Tensor = torch.arange(kernel_size, device=device, dtype=dtype) x = 0.5 - 0.5 * torch.cos(2.0 * math.pi * x / float(kernel_size - 1)) return x " 32576,"def get_html_from_response(response): text = response.text open_tag = text.find('') return text[open_tag: close_tag + len('')] ","def get_html_from_response(response): text = response.text open_tag = text.find('') return text[open_tag: close_tag + len('')] " 1973,"def _beta_divergence(X, W, H, beta, square_root=False): """"""Compute the beta-divergence of X and dot(W, H). Parameters ---------- X : float or array-like of shape (n_samples, n_features) W : float or dense array-like of shape (n_samples, n_components) H : float or dense array-like of shape (n_components, n_features) beta : float or string in {'frobenius', 'kullback-leibler', \ 'itakura-saito'} Parameter of the beta-divergence. If beta == 2, this is half the Frobenius *squared* norm. If beta == 1, this is the generalized Kullback-Leibler divergence. If beta == 0, this is the Itakura-Saito divergence. Else, this is the general beta-divergence. square_root : bool, default=False If True, return np.sqrt(2 * res) For beta == 2, it corresponds to the Frobenius norm. Returns ------- res : float Beta divergence of X and np.dot(X, H) """""" beta = _beta_loss_to_float(beta) # The method can be called with scalars if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) # Frobenius norm if beta == 2: # Avoid the creation of the dense np.dot(W, H) if X is sparse. if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H) cross_prod = trace_dot((X * H.T), W) res = (norm_X + norm_WH - 2. * cross_prod) / 2. else: res = squared_norm(X - np.dot(W, H)) / 2. if square_root: return np.sqrt(res * 2) else: return res if sp.issparse(X): # compute np.dot(W, H) only where X is nonzero WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() # do not affect the zeros: here 0 ** (-1) = 0 and not infinity indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] # used to avoid division by zero WH_data[WH_data == 0] = EPSILON # generalized Kullback-Leibler divergence if beta == 1: # fast and memory efficient computation of np.sum(np.dot(W, H)) sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) # computes np.sum(X * log(X / WH)) only where X is nonzero div = X_data / WH_data res = np.dot(X_data, np.log(div)) # add full np.sum(np.dot(W, H)) - np.sum(X) res += sum_WH - X_data.sum() # Itakura-Saito divergence elif beta == 0: div = X_data / WH_data res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div)) # beta-divergence, beta not in (0, 1, 2) else: if sp.issparse(X): # slow loop, but memory efficient computation of : # np.sum(np.dot(W, H) ** beta) sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) else: sum_WH_beta = np.sum(WH ** beta) sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) res = (X_data ** beta).sum() - beta * sum_X_WH res += sum_WH_beta * (beta - 1) res /= beta * (beta - 1) if square_root: return np.sqrt(2 * res) else: return res ","def _beta_divergence(X, W, H, beta, square_root=False): """"""Compute the beta-divergence of X and dot(W, H). Parameters ---------- X : float or array-like of shape (n_samples, n_features) W : float or dense array-like of shape (n_samples, n_components) H : float or dense array-like of shape (n_components, n_features) beta : float or {'frobenius', 'kullback-leibler', \ 'itakura-saito'} Parameter of the beta-divergence. If beta == 2, this is half the Frobenius *squared* norm. If beta == 1, this is the generalized Kullback-Leibler divergence. If beta == 0, this is the Itakura-Saito divergence. Else, this is the general beta-divergence. square_root : bool, default=False If True, return np.sqrt(2 * res) For beta == 2, it corresponds to the Frobenius norm. Returns ------- res : float Beta divergence of X and np.dot(X, H) """""" beta = _beta_loss_to_float(beta) # The method can be called with scalars if not sp.issparse(X): X = np.atleast_2d(X) W = np.atleast_2d(W) H = np.atleast_2d(H) # Frobenius norm if beta == 2: # Avoid the creation of the dense np.dot(W, H) if X is sparse. if sp.issparse(X): norm_X = np.dot(X.data, X.data) norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H) cross_prod = trace_dot((X * H.T), W) res = (norm_X + norm_WH - 2. * cross_prod) / 2. else: res = squared_norm(X - np.dot(W, H)) / 2. if square_root: return np.sqrt(res * 2) else: return res if sp.issparse(X): # compute np.dot(W, H) only where X is nonzero WH_data = _special_sparse_dot(W, H, X).data X_data = X.data else: WH = np.dot(W, H) WH_data = WH.ravel() X_data = X.ravel() # do not affect the zeros: here 0 ** (-1) = 0 and not infinity indices = X_data > EPSILON WH_data = WH_data[indices] X_data = X_data[indices] # used to avoid division by zero WH_data[WH_data == 0] = EPSILON # generalized Kullback-Leibler divergence if beta == 1: # fast and memory efficient computation of np.sum(np.dot(W, H)) sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1)) # computes np.sum(X * log(X / WH)) only where X is nonzero div = X_data / WH_data res = np.dot(X_data, np.log(div)) # add full np.sum(np.dot(W, H)) - np.sum(X) res += sum_WH - X_data.sum() # Itakura-Saito divergence elif beta == 0: div = X_data / WH_data res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div)) # beta-divergence, beta not in (0, 1, 2) else: if sp.issparse(X): # slow loop, but memory efficient computation of : # np.sum(np.dot(W, H) ** beta) sum_WH_beta = 0 for i in range(X.shape[1]): sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta) else: sum_WH_beta = np.sum(WH ** beta) sum_X_WH = np.dot(X_data, WH_data ** (beta - 1)) res = (X_data ** beta).sum() - beta * sum_X_WH res += sum_WH_beta * (beta - 1) res /= beta * (beta - 1) if square_root: return np.sqrt(2 * res) else: return res " 56589,"def plot_ppc( ax, length_plotters, rows, cols, figsize, animated, obs_plotters, pp_plotters, predictive_dataset, pp_sample_ix, kind, alpha, textsize, mean, jitter, total_pp_samples, legend, # pylint: disable=unused-argument group, # pylint: disable=unused-argument animation_kwargs, # pylint: disable=unused-argument num_pp_samples, backend_kwargs, show, ): """"""Bokeh ppc plot."""""" if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults((""dpi"", ""plot.bokeh.figure.dpi""),), **backend_kwargs, } (figsize, *_, linewidth, markersize) = _scale_fig_size(figsize, textsize, rows, cols) if ax is None: _, axes = _create_axes_grid( length_plotters, rows, cols, figsize=figsize, backend=""bokeh"", backend_kwargs=backend_kwargs, ) else: axes = np.atleast_2d(ax) if len([item for item in axes.ravel() if not None]) != length_plotters: raise ValueError( ""Found {} variables to plot but {} axes instances. They must be equal."".format( length_plotters, len(axes) ) ) if alpha is None: if animated: alpha = 1 else: if kind.lower() == ""scatter"": alpha = 0.7 else: alpha = 0.2 if jitter is None: jitter = 0.0 assert jitter >= 0.0 for i, ax_i in enumerate((item for item in axes.flatten() if item is not None)): var_name, _, obs_vals = obs_plotters[i] pp_var_name, _, pp_vals = pp_plotters[i] dtype = predictive_dataset[pp_var_name].dtype.kind # flatten non-specified dimensions obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == ""kde"": plot_kwargs = {""line_color"": ""red"", ""line_alpha"": alpha, ""line_width"": 0.5 * linewidth} pp_densities = [] pp_xs = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == ""f"": pp_density, lower, upper = _fast_kde(vals) pp_x = np.linspace(lower, upper, len(pp_density)) pp_densities.append(pp_density) pp_xs.append(pp_x) else: bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) pp_densities.append(hist) pp_xs.append(bin_edges) if dtype == ""f"": ax_i.multi_line(pp_xs, pp_densities, **plot_kwargs) else: for x_s, y_s in zip(pp_xs, pp_densities): ax_i.step(x_s, y_s, **plot_kwargs) if dtype == ""f"": plot_kde( obs_vals, plot_kwargs={""line_color"": ""black"", ""line_width"": linewidth}, fill_kwargs={""alpha"": 0}, ax=ax_i, backend=""bokeh"", backend_kwargs={}, show=False, ) else: bins = get_bins(obs_vals) _, hist, bin_edges = histogram(obs_vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, line_color=""black"", line_width=linewidth, mode=""center"", ) if mean: if dtype == ""f"": rep = len(pp_densities) len_density = len(pp_densities[0]) new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density) new_d = np.zeros((rep, len_density)) bins = np.digitize(pp_xs, new_x, right=True) new_x -= (new_x[1] - new_x[0]) / 2 for irep in range(rep): new_d[irep][bins[irep]] = pp_densities[irep] ax_i.line( new_x, new_d.mean(0), color=""blue"", line_dash=""dashed"", line_width=linewidth, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, line_color=""blue"", line_width=linewidth, line_dash=""dashed"", mode=""center"", ) ax_i.yaxis.major_tick_line_color = None ax_i.yaxis.minor_tick_line_color = None ax_i.yaxis.major_label_text_font_size = ""0pt"" elif kind == ""cumulative"": if dtype == ""f"": ax_i.line( *_empirical_cdf(obs_vals), line_color=""black"", line_width=linewidth, ) else: ax_i.step( *_empirical_cdf(obs_vals), line_color=""black"", line_width=linewidth, mode=""center"", ) pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size)) for idx, vals in enumerate(pp_sampled_vals): vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities[2 * idx] = pp_x pp_densities[2 * idx + 1] = pp_density ax_i.multi_line( list(pp_densities[::2]), list(pp_densities[1::2]), line_alpha=alpha, line_color=""pink"", line_width=linewidth, ) if mean: ax_i.line( *_empirical_cdf(pp_vals.flatten()), color=""blue"", line_dash=""dashed"", line_width=linewidth, ) elif kind == ""scatter"": if mean: if dtype == ""f"": plot_kde( pp_vals.flatten(), plot_kwargs={ ""line_color"": ""blue"", ""line_dash"": ""dashed"", ""line_width"": linewidth, }, ax=ax_i, backend=""bokeh"", backend_kwargs={}, show=False, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, color=""blue"", line_width=linewidth, line_dash=""dashed"", mode=""center"", ) jitter_scale = 0.1 y_rows = np.linspace(0, 0.1, num_pp_samples + 1) scale_low = 0 scale_high = jitter_scale * jitter obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals)) ax_i.circle( obs_vals, obs_yvals, fill_color=""black"", size=markersize, line_alpha=alpha, ) for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax_i.circle(vals, yvals, fill_color=""red"", size=markersize, fill_alpha=alpha) ax_i.yaxis.major_tick_line_color = None ax_i.yaxis.minor_tick_line_color = None ax_i.yaxis.major_label_text_font_size = ""0pt"" if var_name != pp_var_name: xlabel = ""{} / {}"".format(var_name, pp_var_name) else: xlabel = var_name ax_i.xaxis.axis_label = xlabel show_layout(axes, show) return axes ","def plot_ppc( ax, length_plotters, rows, cols, figsize, animated, obs_plotters, pp_plotters, predictive_dataset, pp_sample_ix, kind, alpha, textsize, mean, jitter, total_pp_samples, legend, # pylint: disable=unused-argument group, # pylint: disable=unused-argument animation_kwargs, # pylint: disable=unused-argument num_pp_samples, backend_kwargs, show, ): """"""Bokeh ppc plot."""""" if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults((""dpi"", ""plot.bokeh.figure.dpi""),), **backend_kwargs, } (figsize, *_, linewidth, markersize) = _scale_fig_size(figsize, textsize, rows, cols) if ax is None: _, axes = _create_axes_grid( length_plotters, rows, cols, figsize=figsize, backend=""bokeh"", backend_kwargs=backend_kwargs, ) else: axes = np.atleast_2d(ax) if len([item for item in axes.ravel() if not None]) != length_plotters: raise ValueError( ""Found {} variables to plot but {} axes instances. They must be equal."".format( length_plotters, len(axes) ) ) if alpha is None: if animated: alpha = 1 else: if kind.lower() == ""scatter"": alpha = 0.7 else: alpha = 0.2 if jitter is None: jitter = 0.0 if jitter < 0.0: raise ValueError(""jitter must be >=0."") for i, ax_i in enumerate((item for item in axes.flatten() if item is not None)): var_name, _, obs_vals = obs_plotters[i] pp_var_name, _, pp_vals = pp_plotters[i] dtype = predictive_dataset[pp_var_name].dtype.kind # flatten non-specified dimensions obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == ""kde"": plot_kwargs = {""line_color"": ""red"", ""line_alpha"": alpha, ""line_width"": 0.5 * linewidth} pp_densities = [] pp_xs = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == ""f"": pp_density, lower, upper = _fast_kde(vals) pp_x = np.linspace(lower, upper, len(pp_density)) pp_densities.append(pp_density) pp_xs.append(pp_x) else: bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) pp_densities.append(hist) pp_xs.append(bin_edges) if dtype == ""f"": ax_i.multi_line(pp_xs, pp_densities, **plot_kwargs) else: for x_s, y_s in zip(pp_xs, pp_densities): ax_i.step(x_s, y_s, **plot_kwargs) if dtype == ""f"": plot_kde( obs_vals, plot_kwargs={""line_color"": ""black"", ""line_width"": linewidth}, fill_kwargs={""alpha"": 0}, ax=ax_i, backend=""bokeh"", backend_kwargs={}, show=False, ) else: bins = get_bins(obs_vals) _, hist, bin_edges = histogram(obs_vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, line_color=""black"", line_width=linewidth, mode=""center"", ) if mean: if dtype == ""f"": rep = len(pp_densities) len_density = len(pp_densities[0]) new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density) new_d = np.zeros((rep, len_density)) bins = np.digitize(pp_xs, new_x, right=True) new_x -= (new_x[1] - new_x[0]) / 2 for irep in range(rep): new_d[irep][bins[irep]] = pp_densities[irep] ax_i.line( new_x, new_d.mean(0), color=""blue"", line_dash=""dashed"", line_width=linewidth, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, line_color=""blue"", line_width=linewidth, line_dash=""dashed"", mode=""center"", ) ax_i.yaxis.major_tick_line_color = None ax_i.yaxis.minor_tick_line_color = None ax_i.yaxis.major_label_text_font_size = ""0pt"" elif kind == ""cumulative"": if dtype == ""f"": ax_i.line( *_empirical_cdf(obs_vals), line_color=""black"", line_width=linewidth, ) else: ax_i.step( *_empirical_cdf(obs_vals), line_color=""black"", line_width=linewidth, mode=""center"", ) pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size)) for idx, vals in enumerate(pp_sampled_vals): vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities[2 * idx] = pp_x pp_densities[2 * idx + 1] = pp_density ax_i.multi_line( list(pp_densities[::2]), list(pp_densities[1::2]), line_alpha=alpha, line_color=""pink"", line_width=linewidth, ) if mean: ax_i.line( *_empirical_cdf(pp_vals.flatten()), color=""blue"", line_dash=""dashed"", line_width=linewidth, ) elif kind == ""scatter"": if mean: if dtype == ""f"": plot_kde( pp_vals.flatten(), plot_kwargs={ ""line_color"": ""blue"", ""line_dash"": ""dashed"", ""line_width"": linewidth, }, ax=ax_i, backend=""bokeh"", backend_kwargs={}, show=False, ) else: vals = pp_vals.flatten() bins = get_bins(vals) _, hist, bin_edges = histogram(vals, bins=bins) hist = np.concatenate((hist[:1], hist)) ax_i.step( bin_edges, hist, color=""blue"", line_width=linewidth, line_dash=""dashed"", mode=""center"", ) jitter_scale = 0.1 y_rows = np.linspace(0, 0.1, num_pp_samples + 1) scale_low = 0 scale_high = jitter_scale * jitter obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals)) ax_i.circle( obs_vals, obs_yvals, fill_color=""black"", size=markersize, line_alpha=alpha, ) for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax_i.circle(vals, yvals, fill_color=""red"", size=markersize, fill_alpha=alpha) ax_i.yaxis.major_tick_line_color = None ax_i.yaxis.minor_tick_line_color = None ax_i.yaxis.major_label_text_font_size = ""0pt"" if var_name != pp_var_name: xlabel = ""{} / {}"".format(var_name, pp_var_name) else: xlabel = var_name ax_i.xaxis.axis_label = xlabel show_layout(axes, show) return axes " 34498,"def convert_nlu(training_data_path: Path, output_path: Path, source_path: Path): reader = MarkdownReader() writer = RasaYAMLWriter() training_data = reader.read(training_data_path) writer.dump(output_path, training_data) print_success(f""Converted NLU file: '{source_path}' >> '{output_path}'"") ","def convert_nlu(training_data_path: Path, output_path: Path, source_path: Path) -> None: reader = MarkdownReader() writer = RasaYAMLWriter() training_data = reader.read(training_data_path) writer.dump(output_path, training_data) print_success(f""Converted NLU file: '{source_path}' >> '{output_path}'"") " 23419,"def get_image_path(name): """"""Return image absolute path"""""" return IMAGE_PATH_MANAGER.get_image_path(name) ","def get_image_path(name): """"""Return absolute image path."""""" return IMAGE_PATH_MANAGER.get_image_path(name) " 44176,"def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """"""Computes the ExpvalCost and catches the initial deprecation warning."""""" with pytest.warns(UserWarning, match=""will be deprecated,""): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res ","def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """"""Computes the ExpvalCost and catches the initial deprecation warning."""""" with pytest.warns(UserWarning, match=""is deprecated,""): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res " 22484,"def _expand_macros(elements, macros, tokens, visited=None): if not macros and not tokens: return for element in elements: while True: expand_el = element.find('.//expand') if expand_el is None: break if visited is None: v = list() else: v = visited _expand_macro(expand_el, macros, tokens, v) ","def _expand_macros(elements, macros, tokens, visited=None): if not macros and not tokens: return for element in elements: while True: expand_el = element.find('.//expand') if expand_el is None: break if visited is None: v = [] else: v = visited _expand_macro(expand_el, macros, tokens, v) " 1511,"def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None, dual=False, penalty='l2', intercept_scaling=1., multi_class='auto', random_state=None, check_input=True, max_squared_sum=None, sample_weight=None, l1_ratio=None, precondition=True): """"""Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Note that there will be no speedup with liblinear solver, since it does not handle warm-starting. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Input data, target values. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : int | array-like, shape (n_cs,) List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. fit_intercept : bool Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int Maximum number of iterations for the solver. tol : float Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'} Numerical solver to use. coef : array-like, shape (n_features,), default None Initialization value for coefficients of logistic regression. Useless for liblinear solver. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The ""balanced"" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : str, 'l1', 'l2', or 'elasticnet' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is only supported by the 'saga' solver. intercept_scaling : float, default 1. Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a ""synthetic"" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : {'ovr', 'multinomial', 'auto'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.22 Default changed from 'ovr' to 'auto' in 0.22. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'sag' or 'liblinear'. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like, shape(n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. l1_ratio : float or None, optional (default=None) The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. For ``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1). Cs : ndarray Grid of Cs used for cross-validation. n_iter : array, shape (n_cs,) Actual number of iteration for each Cs. Notes ----- You might get slightly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. .. versionchanged:: 0.19 The ""copy"" parameter was removed. """""" if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) solver = _check_solver(solver, penalty, dual) # Preprocessing. if check_input: X = check_array(X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver != 'liblinear') y = check_array(y, ensure_2d=False, dtype=None) check_consistent_length(X, y) _, n_features = X.shape classes = np.unique(y) random_state = check_random_state(random_state) multi_class = _check_multi_class(multi_class, solver, len(classes)) if pos_class is None and multi_class != 'multinomial': if (classes.size > 2): raise ValueError('To fit OvR, use the pos_class argument') # np.unique(y) gives labels in sorted order. pos_class = classes[1] # If sample weights exist, convert them to array (support for lists) # and check length # Otherwise set them to 1 for all examples sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is ""balanced"", then # the class_weights are assigned after masking the labels with a OvR. le = LabelEncoder() if isinstance(class_weight, dict) or multi_class == 'multinomial': class_weight_ = compute_class_weight(class_weight, classes, y) sample_weight *= class_weight_[le.fit_transform(y)] # For doing a ovr, we need to mask the labels first. for the # multinomial case this is not necessary. if multi_class == 'ovr': w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) mask_classes = np.array([-1, 1]) mask = (y == pos_class) y_bin = np.ones(y.shape, dtype=X.dtype) y_bin[~mask] = -1. # for compute_class_weight if class_weight == ""balanced"": class_weight_ = compute_class_weight(class_weight, mask_classes, y_bin) sample_weight *= class_weight_[le.fit_transform(y_bin)] else: if solver not in ['sag', 'saga']: lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) else: # SAG multinomial solver needs LabelEncoder, not LabelBinarizer le = LabelEncoder() Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F', dtype=X.dtype) # preconditioning X_pre = X X_scale = None X_offset = None if precondition and solver == 'lbfgs': # FIXME this duplicates come code from _preprocess_data # and should be refactored if sparse.issparse(X): X_mean, X_var = mean_variance_axis(X, axis=0) X_scale = np.sqrt(X_var, X_var) X_scale[X_scale == 0] = 1 del X_var if fit_intercept: X_offset = -X_mean # can we actually do inplace here? inplace_column_scale(X_pre, 1 / X_scale) else: X_mean = X.mean(axis=0) if fit_intercept: X_pre = X - X_mean X_scale = X.std(axis=0) X_scale[X_scale == 0] = 1 X_pre = X_pre / X_scale # warm starting if coef is not None: # it must work both giving the bias term and not if multi_class == 'ovr': if coef.size not in (n_features, w0.size): raise ValueError( 'Initialization coef is of shape %d, expected shape ' '%d or %d' % (coef.size, n_features, w0.size)) w0[:coef.size] = coef if solver == 'lbfgs' and precondition: if fit_intercept: w0[-1] += np.inner(w0[:n_features], X_mean) w0[:n_features] *= X_scale else: # For binary problems coef.shape[0] should be 1, otherwise it # should be classes.size. n_classes = classes.size if n_classes == 2: n_classes = 1 if (coef.shape[0] != n_classes or coef.shape[1] not in (n_features, n_features + 1)): raise ValueError( 'Initialization coef is of shape (%d, %d), expected ' 'shape (%d, %d) or (%d, %d)' % ( coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1)) if n_classes == 1: w0[0, :coef.shape[1]] = -coef w0[1, :coef.shape[1]] = coef else: w0[:, :coef.shape[1]] = coef if solver == 'lbfgs' and precondition: if fit_intercept: w0[:, -1] += np.dot(w0[:, :n_features], X_mean) w0[:, :n_features] *= X_scale if multi_class == 'multinomial': # scipy.optimize.minimize and newton-cg accepts only # ravelled parameters. if solver in ['lbfgs', 'newton-cg']: w0 = w0.ravel() target = Y_multi if solver == 'lbfgs': func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2] elif solver == 'newton-cg': func = lambda x, *args: _multinomial_loss(x, *args)[0] grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1] hess = _multinomial_grad_hess warm_start_sag = {'coef': w0.T} else: # binary logistic regression target = y_bin if solver == 'lbfgs': func = _logistic_loss_and_grad elif solver == 'newton-cg': func = _logistic_loss grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1] hess = _logistic_grad_hess warm_start_sag = {'coef': np.expand_dims(w0, axis=1)} coefs = list() n_iter = np.zeros(len(Cs), dtype=np.int32) for i, C in enumerate(Cs): if solver == 'lbfgs': iprint = [-1, 50, 1, 100, 101][ np.searchsorted(np.array([0, 1, 2, 3]), verbose)] opt_res = optimize.minimize( func, w0, method=""L-BFGS-B"", jac=True, args=(X_pre, target, 1. / C, sample_weight, X_scale, X_offset), options={""iprint"": iprint, ""gtol"": tol, ""maxiter"": max_iter} ) n_iter_i = _check_optimize_result(solver, opt_res, max_iter) w0, loss = opt_res.x, opt_res.fun if precondition and multi_class != 'multinomial': # adjust weight scale for rescaling w0[:n_features] = w0[:n_features] / X_scale # adjust intercept for mean subtraction if fit_intercept: w0[-1] = w0[-1] - np.inner(w0[:-1], X_mean) elif solver == 'newton-cg': args = (X, target, 1. / C, sample_weight) w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol) elif solver == 'liblinear': coef_, intercept_, n_iter_i, = _fit_liblinear( X, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight) if fit_intercept: w0 = np.concatenate([coef_.ravel(), intercept_]) else: w0 = coef_.ravel() elif solver in ['sag', 'saga']: if multi_class == 'multinomial': target = target.astype(X.dtype, copy=False) loss = 'multinomial' else: loss = 'log' # alpha is for L2-norm, beta is for L1-norm if penalty == 'l1': alpha = 0. beta = 1. / C elif penalty == 'l2': alpha = 1. / C beta = 0. else: # Elastic-Net penalty alpha = (1. / C) * (1 - l1_ratio) beta = (1. / C) * l1_ratio w0, n_iter_i, warm_start_sag = sag_solver( X, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=(solver == 'saga')) else: raise ValueError(""solver must be one of {'liblinear', 'lbfgs', "" ""'newton-cg', 'sag'}, got '%s' instead"" % solver) if multi_class == 'multinomial': n_classes = max(2, classes.size) multi_w0 = np.reshape(w0, (n_classes, -1)) if solver == 'lbfgs' and precondition: if fit_intercept: multi_w0[:, :-1] = multi_w0[:, :-1] / X_scale # adjust intercept for preconditioning multi_w0[:, -1] = (multi_w0[:, -1] - np.dot(multi_w0[:, :-1], X_mean)) else: multi_w0 = multi_w0 / X_scale if n_classes == 2: multi_w0 = multi_w0[1][np.newaxis, :] coefs.append(multi_w0.copy()) else: coefs.append(w0.copy()) n_iter[i] = n_iter_i return np.array(coefs), np.array(Cs), n_iter ","def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None, dual=False, penalty='l2', intercept_scaling=1., multi_class='auto', random_state=None, check_input=True, max_squared_sum=None, sample_weight=None, l1_ratio=None, precondition=True): """"""Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Note that there will be no speedup with liblinear solver, since it does not handle warm-starting. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Input data, target values. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : int | array-like, shape (n_cs,) List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. fit_intercept : bool Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int Maximum number of iterations for the solver. tol : float Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'} Numerical solver to use. coef : array-like, shape (n_features,), default None Initialization value for coefficients of logistic regression. Useless for liblinear solver. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The ""balanced"" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : str, 'l1', 'l2', or 'elasticnet' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is only supported by the 'saga' solver. intercept_scaling : float, default 1. Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a ""synthetic"" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : {'ovr', 'multinomial', 'auto'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.22 Default changed from 'ovr' to 'auto' in 0.22. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'sag' or 'liblinear'. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like, shape(n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. l1_ratio : float or None, optional (default=None) The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. For ``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1). Cs : ndarray Grid of Cs used for cross-validation. n_iter : array, shape (n_cs,) Actual number of iteration for each Cs. Notes ----- You might get slightly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. .. versionchanged:: 0.19 The ""copy"" parameter was removed. """""" if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) solver = _check_solver(solver, penalty, dual) # Preprocessing. if check_input: X = check_array(X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver != 'liblinear') y = check_array(y, ensure_2d=False, dtype=None) check_consistent_length(X, y) _, n_features = X.shape classes = np.unique(y) random_state = check_random_state(random_state) multi_class = _check_multi_class(multi_class, solver, len(classes)) if pos_class is None and multi_class != 'multinomial': if (classes.size > 2): raise ValueError('To fit OvR, use the pos_class argument') # np.unique(y) gives labels in sorted order. pos_class = classes[1] # If sample weights exist, convert them to array (support for lists) # and check length # Otherwise set them to 1 for all examples sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is ""balanced"", then # the class_weights are assigned after masking the labels with a OvR. le = LabelEncoder() if isinstance(class_weight, dict) or multi_class == 'multinomial': class_weight_ = compute_class_weight(class_weight, classes, y) sample_weight *= class_weight_[le.fit_transform(y)] # For doing a ovr, we need to mask the labels first. for the # multinomial case this is not necessary. if multi_class == 'ovr': w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) mask_classes = np.array([-1, 1]) mask = (y == pos_class) y_bin = np.ones(y.shape, dtype=X.dtype) y_bin[~mask] = -1. # for compute_class_weight if class_weight == ""balanced"": class_weight_ = compute_class_weight(class_weight, mask_classes, y_bin) sample_weight *= class_weight_[le.fit_transform(y_bin)] else: if solver not in ['sag', 'saga']: lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) else: # SAG multinomial solver needs LabelEncoder, not LabelBinarizer le = LabelEncoder() Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F', dtype=X.dtype) # preconditioning X_pre = X X_scale = None X_offset = None if precondition and solver == 'lbfgs': # FIXME this duplicates some code from _preprocess_data # and should be refactored if sparse.issparse(X): X_mean, X_var = mean_variance_axis(X, axis=0) X_scale = np.sqrt(X_var, X_var) X_scale[X_scale == 0] = 1 del X_var if fit_intercept: X_offset = -X_mean # can we actually do inplace here? inplace_column_scale(X_pre, 1 / X_scale) else: X_mean = X.mean(axis=0) if fit_intercept: X_pre = X - X_mean X_scale = X.std(axis=0) X_scale[X_scale == 0] = 1 X_pre = X_pre / X_scale # warm starting if coef is not None: # it must work both giving the bias term and not if multi_class == 'ovr': if coef.size not in (n_features, w0.size): raise ValueError( 'Initialization coef is of shape %d, expected shape ' '%d or %d' % (coef.size, n_features, w0.size)) w0[:coef.size] = coef if solver == 'lbfgs' and precondition: if fit_intercept: w0[-1] += np.inner(w0[:n_features], X_mean) w0[:n_features] *= X_scale else: # For binary problems coef.shape[0] should be 1, otherwise it # should be classes.size. n_classes = classes.size if n_classes == 2: n_classes = 1 if (coef.shape[0] != n_classes or coef.shape[1] not in (n_features, n_features + 1)): raise ValueError( 'Initialization coef is of shape (%d, %d), expected ' 'shape (%d, %d) or (%d, %d)' % ( coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1)) if n_classes == 1: w0[0, :coef.shape[1]] = -coef w0[1, :coef.shape[1]] = coef else: w0[:, :coef.shape[1]] = coef if solver == 'lbfgs' and precondition: if fit_intercept: w0[:, -1] += np.dot(w0[:, :n_features], X_mean) w0[:, :n_features] *= X_scale if multi_class == 'multinomial': # scipy.optimize.minimize and newton-cg accepts only # ravelled parameters. if solver in ['lbfgs', 'newton-cg']: w0 = w0.ravel() target = Y_multi if solver == 'lbfgs': func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2] elif solver == 'newton-cg': func = lambda x, *args: _multinomial_loss(x, *args)[0] grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1] hess = _multinomial_grad_hess warm_start_sag = {'coef': w0.T} else: # binary logistic regression target = y_bin if solver == 'lbfgs': func = _logistic_loss_and_grad elif solver == 'newton-cg': func = _logistic_loss grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1] hess = _logistic_grad_hess warm_start_sag = {'coef': np.expand_dims(w0, axis=1)} coefs = list() n_iter = np.zeros(len(Cs), dtype=np.int32) for i, C in enumerate(Cs): if solver == 'lbfgs': iprint = [-1, 50, 1, 100, 101][ np.searchsorted(np.array([0, 1, 2, 3]), verbose)] opt_res = optimize.minimize( func, w0, method=""L-BFGS-B"", jac=True, args=(X_pre, target, 1. / C, sample_weight, X_scale, X_offset), options={""iprint"": iprint, ""gtol"": tol, ""maxiter"": max_iter} ) n_iter_i = _check_optimize_result(solver, opt_res, max_iter) w0, loss = opt_res.x, opt_res.fun if precondition and multi_class != 'multinomial': # adjust weight scale for rescaling w0[:n_features] = w0[:n_features] / X_scale # adjust intercept for mean subtraction if fit_intercept: w0[-1] = w0[-1] - np.inner(w0[:-1], X_mean) elif solver == 'newton-cg': args = (X, target, 1. / C, sample_weight) w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol) elif solver == 'liblinear': coef_, intercept_, n_iter_i, = _fit_liblinear( X, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight) if fit_intercept: w0 = np.concatenate([coef_.ravel(), intercept_]) else: w0 = coef_.ravel() elif solver in ['sag', 'saga']: if multi_class == 'multinomial': target = target.astype(X.dtype, copy=False) loss = 'multinomial' else: loss = 'log' # alpha is for L2-norm, beta is for L1-norm if penalty == 'l1': alpha = 0. beta = 1. / C elif penalty == 'l2': alpha = 1. / C beta = 0. else: # Elastic-Net penalty alpha = (1. / C) * (1 - l1_ratio) beta = (1. / C) * l1_ratio w0, n_iter_i, warm_start_sag = sag_solver( X, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=(solver == 'saga')) else: raise ValueError(""solver must be one of {'liblinear', 'lbfgs', "" ""'newton-cg', 'sag'}, got '%s' instead"" % solver) if multi_class == 'multinomial': n_classes = max(2, classes.size) multi_w0 = np.reshape(w0, (n_classes, -1)) if solver == 'lbfgs' and precondition: if fit_intercept: multi_w0[:, :-1] = multi_w0[:, :-1] / X_scale # adjust intercept for preconditioning multi_w0[:, -1] = (multi_w0[:, -1] - np.dot(multi_w0[:, :-1], X_mean)) else: multi_w0 = multi_w0 / X_scale if n_classes == 2: multi_w0 = multi_w0[1][np.newaxis, :] coefs.append(multi_w0.copy()) else: coefs.append(w0.copy()) n_iter[i] = n_iter_i return np.array(coefs), np.array(Cs), n_iter " 6787,"def make_boilerplate(template, doc, opts=None): target_path = get_doc_path(doc.module, doc.doctype, doc.name) template_name = template.replace(""controller"", scrub(doc.name)) if template_name.endswith('._py'): template_name = template_name[:-4] + '.py' target_file_path = os.path.join(target_path, template_name) if not doc: doc = {} app_publisher = get_app_publisher(doc.module) if not os.path.exists(target_file_path): if not opts: opts = {} base_class = 'Document' base_class_import = 'from frappe.model.document import Document' if hasattr(doc, ""is_tree""): base_class = 'NestedSet' base_class_import = 'from frappe.utils.nestedset import NestedSet' with open(target_file_path, 'w') as target: with open(os.path.join(get_module_path(""core""), ""doctype"", scrub(doc.doctype), ""boilerplate"", template), 'r') as source: target.write(frappe.as_unicode( frappe.utils.cstr(source.read()).format( app_publisher=app_publisher, year=frappe.utils.nowdate()[:4], classname=doc.name.replace("" "", """"), base_class_import=base_class_import, base_class=base_class, doctype=doc.name, **opts) )) ","def make_boilerplate(template, doc, opts=None): target_path = get_doc_path(doc.module, doc.doctype, doc.name) template_name = template.replace(""controller"", scrub(doc.name)) if template_name.endswith('._py'): template_name = template_name[:-4] + '.py' target_file_path = os.path.join(target_path, template_name) if not doc: doc = {} app_publisher = get_app_publisher(doc.module) if not os.path.exists(target_file_path): if not opts: opts = {} base_class = 'Document' base_class_import = 'from frappe.model.document import Document' if doc.get('is_tree'): base_class = 'NestedSet' base_class_import = 'from frappe.utils.nestedset import NestedSet' with open(target_file_path, 'w') as target: with open(os.path.join(get_module_path(""core""), ""doctype"", scrub(doc.doctype), ""boilerplate"", template), 'r') as source: target.write(frappe.as_unicode( frappe.utils.cstr(source.read()).format( app_publisher=app_publisher, year=frappe.utils.nowdate()[:4], classname=doc.name.replace("" "", """"), base_class_import=base_class_import, base_class=base_class, doctype=doc.name, **opts) )) " 34420,"def _add_confused_labels_to_report( report: Dict[Text, Dict[Text, Any]], confusion_matrix: np.ndarray, labels: List[Text], exclude_labels: List[Text] = None, ) -> Dict[Text, Dict[Text, Union[Dict, Any]]]: """"""Adds a field ""confused_with"" to the evaluation report. The value is a dict of {""false_positive_label"": false_positive_count} pairs. If there are no false positives in the confusion matrix, the dict will be empty. Typically we include the two most commonly false positive labels, three in the rare case that the diagonal element in the confusion matrix is not one of the three highest values in the row. Args: report: the evaluation report confusion_matrix: confusion matrix labels: list of labels Returns: updated evaluation report """""" if exclude_labels is None: exclude_labels = [] # sort confusion matrix by false positives indices = np.argsort(confusion_matrix, axis=1) n_candidates = min(3, len(labels)) for label in labels: if label in exclude_labels: continue # it is possible to predict intent 'None' if report.get(label): report[label][""confused_with""] = {} for i, label in enumerate(labels): if label in exclude_labels: continue for j in range(n_candidates): label_idx = indices[i, -(1 + j)] false_pos_label = labels[label_idx] false_positives = int(confusion_matrix[i, label_idx]) if ( false_pos_label != label and false_pos_label not in exclude_labels and false_positives > 0 ): report[label][""confused_with""][false_pos_label] = false_positives return report ","def _add_confused_labels_to_report( report: Dict[Text, Dict[Text, Any]], confusion_matrix: np.ndarray, labels: List[Text], exclude_labels: Optional[List[Text]] = None, ) -> Dict[Text, Dict[Text, Union[Dict, Any]]]: """"""Adds a field ""confused_with"" to the evaluation report. The value is a dict of {""false_positive_label"": false_positive_count} pairs. If there are no false positives in the confusion matrix, the dict will be empty. Typically we include the two most commonly false positive labels, three in the rare case that the diagonal element in the confusion matrix is not one of the three highest values in the row. Args: report: the evaluation report confusion_matrix: confusion matrix labels: list of labels Returns: updated evaluation report """""" if exclude_labels is None: exclude_labels = [] # sort confusion matrix by false positives indices = np.argsort(confusion_matrix, axis=1) n_candidates = min(3, len(labels)) for label in labels: if label in exclude_labels: continue # it is possible to predict intent 'None' if report.get(label): report[label][""confused_with""] = {} for i, label in enumerate(labels): if label in exclude_labels: continue for j in range(n_candidates): label_idx = indices[i, -(1 + j)] false_pos_label = labels[label_idx] false_positives = int(confusion_matrix[i, label_idx]) if ( false_pos_label != label and false_pos_label not in exclude_labels and false_positives > 0 ): report[label][""confused_with""][false_pos_label] = false_positives return report " 54268,"def get_private_registry_docs_image_pull_secrets(): repository = ""bob-the-registry"" secret_name = ""bob-the-registry-secret"" kubernetes_objects = { ""Deployment"": ""spec.template.spec.imagePullSecrets"", ""StatefulSet"": ""spec.template.spec.imagePullSecrets"", ""Job"": ""spec.template.spec.imagePullSecrets"", ""DaemonSet"": ""spec.template.spec.imagePullSecrets"", ""Pod"": ""spec.template.spec.imagePullSecrets"", ""CronJob"": ""spec.jobTemplate.spec.template.spec.imagePullSecrets"", } docs = render_chart( values={ ""global"": { ""privateRegistry"": { ""enabled"": True, ""repository"": repository, ""secretName"": secret_name, } } }, ) searched_docs = [] for key, val in kubernetes_objects.items(): searched_docs += jmespath.search( ""[?kind == `"" + key + ""`].{name: metadata.name, kind: kind, image_pull_secrets: "" + val + ""}"", docs, ) formatted_docs = {} for searched_doc in searched_docs: name = searched_doc[""name""] + ""_"" + searched_doc[""kind""] formatted_docs[name] = searched_doc[""image_pull_secrets""] return formatted_docs ","def get_private_registry_docs_image_pull_secrets(): repository = ""bob-the-registry"" secret_name = ""bob-the-registry-secret"" kubernetes_objects = { ""Deployment"": ""spec.template.spec.imagePullSecrets"", ""StatefulSet"": ""spec.template.spec.imagePullSecrets"", ""Job"": ""spec.template.spec.imagePullSecrets"", ""DaemonSet"": ""spec.template.spec.imagePullSecrets"", ""Pod"": ""spec.template.spec.imagePullSecrets"", ""CronJob"": ""spec.jobTemplate.spec.template.spec.imagePullSecrets"", } docs = render_chart( values={ ""global"": { ""privateRegistry"": { ""enabled"": True, ""repository"": repository, ""secretName"": secret_name, } } }, ) searched_docs = [] for key, val in kubernetes_objects.items(): searched_docs += jmespath.search( ""[?kind == `%s`].{name: metadata.name, kind: kind, image_pull_secrets: %s}"" % (key, val), docs, ) formatted_docs = {} for searched_doc in searched_docs: name = searched_doc[""name""] + ""_"" + searched_doc[""kind""] formatted_docs[name] = searched_doc[""image_pull_secrets""] return formatted_docs " 8060,"def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """""" Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> >>> def setup_hdfs5_index(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5table.cols.foo.create_csindex() # CS index is required ... h5file.flush() ... h5file.close() >>> >>> setup_hdfs5_index() # doctest: +SKIP >>> ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', sortby='foo') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """""" assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step) ","def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """""" Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> >>> def setup_hdfs5_index(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5table.cols.foo.create_csindex() # CS index is required ... h5file.flush() ... h5file.close() >>> >>> setup_hdf5_index() # doctest: +SKIP >>> ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', sortby='foo') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """""" assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step) " 27912,"def observe_lr(optimizer_name='main', observation_key='lr'): """"""Returns a trainer extension to record the learning rate. Args: optimizer_name (str): Name of optimizer whose learning rate is recorded. observation_key (str): Key of observation to record. Returns: The extension function. This extension is triggered every 1 epoch by default. To change this, specify ``trigger`` argument to :meth:`Trainer.extend() ` method. """""" return observe_value( observation_key, lambda trainer: trainer.updater.get_optimizer(optimizer_name).lr) ","def observe_lr(optimizer_name='main', observation_key='lr'): """"""Returns a trainer extension to record the learning rate. Args: optimizer_name (str): Name of optimizer whose learning rate is recorded. observation_key (str): Key of observation to record. Returns: The extension function. This extension is triggered each epoch by default. To change this, specify ``trigger`` argument to :meth:`Trainer.extend() ` method. """""" return observe_value( observation_key, lambda trainer: trainer.updater.get_optimizer(optimizer_name).lr) " 41551,"def get_new_subject_split_new(df, data_testing, random_seed, train_frac, test_frac, log_directory, balance, subject_selection=None): """"""Randomly split dataset between training / validation / testing. Randomly split dataset between training / validation / testing\ and save it in log_directory + ""/split_datasets.joblib"". Args: df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata. data_testing (dict): Used to specify the data_type and data_value to include in the testing set. random_seed (int): Random seed. train_frac (float): Training dataset proportion, between 0 and 1. test_frac (float): Testing dataset proportionm between 0 and 1. log_directory (string): Output folder. balance (string): Metadata contained in ""participants.tsv"" file with categorical values. Each category will be evenly distributed in the training, validation and testing datasets. subject_selection (dict): Used to specify a custom subject selection from a dataset. Returns: list, list list: Training, validation and testing subjects lists. """""" if subject_selection is not None: # Verify subject_selection format if not (len(subject_selection[""metadata""]) == len(subject_selection[""n""]) == len(subject_selection[""value""])): raise ValueError(""All lists in subject_selection parameter should have the same length."") sampled_dfs = [] for m, n, v in zip(subject_selection[""metadata""], subject_selection[""n""], subject_selection[""value""]): sampled_dfs.append(df[df[m] == v].sample(n=n, random_state=random_seed)) if len(sampled_dfs) != 0: df = pd.concat(sampled_dfs) # If balance, then split the dataframe for each categorical value of the ""balance"" column if balance: if balance in df.keys(): df_list = [df[df[balance] == k] for k in df[balance].unique().tolist()] else: logger.warning(""No column named '{}' was found in 'participants.tsv' file. Not taken into account to split "" ""the dataset."".format(balance)) df_list = [df] else: df_list = [df] train_lst, valid_lst, test_lst = [], [], [] for df_tmp in df_list: # Split dataset on each section of subjects train_tmp, valid_tmp, test_tmp = split_dataset_new(df=df_tmp, data_testing=data_testing, random_seed=random_seed, train_frac=train_frac, test_frac=test_frac) # Update the dataset lists train_lst += train_tmp valid_lst += valid_tmp test_lst += test_tmp # save the subject distribution split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst} split_path = os.path.join(log_directory, ""split_datasets.joblib"") joblib.dump(split_dct, split_path) return train_lst, valid_lst, test_lst ","def get_new_subject_split_new(df, data_testing, random_seed, train_frac, test_frac, log_directory, balance, subject_selection=None): """"""Randomly split dataset between training / validation / testing. Randomly split dataset between training / validation / testing\ and save it in log_directory + ""/split_datasets.joblib"". Args: df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata. data_testing (dict): Used to specify the data_type and data_value to include in the testing set. random_seed (int): Random seed. train_frac (float): Training dataset proportion, between 0 and 1. test_frac (float): Testing dataset proportionm between 0 and 1. log_directory (string): Output folder. balance (str): Metadata contained in ""participants.tsv"" file with categorical values. Each category will be evenly distributed in the training, validation and testing datasets. subject_selection (dict): Used to specify a custom subject selection from a dataset. Returns: list, list list: Training, validation and testing subjects lists. """""" if subject_selection is not None: # Verify subject_selection format if not (len(subject_selection[""metadata""]) == len(subject_selection[""n""]) == len(subject_selection[""value""])): raise ValueError(""All lists in subject_selection parameter should have the same length."") sampled_dfs = [] for m, n, v in zip(subject_selection[""metadata""], subject_selection[""n""], subject_selection[""value""]): sampled_dfs.append(df[df[m] == v].sample(n=n, random_state=random_seed)) if len(sampled_dfs) != 0: df = pd.concat(sampled_dfs) # If balance, then split the dataframe for each categorical value of the ""balance"" column if balance: if balance in df.keys(): df_list = [df[df[balance] == k] for k in df[balance].unique().tolist()] else: logger.warning(""No column named '{}' was found in 'participants.tsv' file. Not taken into account to split "" ""the dataset."".format(balance)) df_list = [df] else: df_list = [df] train_lst, valid_lst, test_lst = [], [], [] for df_tmp in df_list: # Split dataset on each section of subjects train_tmp, valid_tmp, test_tmp = split_dataset_new(df=df_tmp, data_testing=data_testing, random_seed=random_seed, train_frac=train_frac, test_frac=test_frac) # Update the dataset lists train_lst += train_tmp valid_lst += valid_tmp test_lst += test_tmp # save the subject distribution split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst} split_path = os.path.join(log_directory, ""split_datasets.joblib"") joblib.dump(split_dct, split_path) return train_lst, valid_lst, test_lst " 33023,"def save_graph_osm(G, node_tags=settings.osm_xml_node_tags, node_attrs=settings.osm_xml_node_attrs, edge_tags=settings.osm_xml_way_tags, edge_attrs=settings.osm_xml_way_attrs, oneway=False, filename='graph.osm', folder=None): """""" Save a graph as an OSM XML formatted file. NOTE: for very large networks this method can take upwards of 30+ minutes to finish. Parameters __________ G : networkx multidigraph or multigraph filename : string the name of the osm file (including file extension) folder : string the folder to contain the file, if None, use default data folder Returns ------- None """""" start_time = time.time() if folder is None: folder = settings.data_folder # get undirected graph so we don't generate duplicate nodes H = get_undirected(G) # create a copy to convert all the node/edge attribute values to string H_save = H.copy() gdf_nodes, gdf_edges = graph_to_gdfs( H_save, node_geometry=False, fill_edge_geometry=False) # rename columns per osm specification gdf_nodes.rename( columns={'osmid': 'id', 'x': 'lon', 'y': 'lat'}, inplace=True) if 'uniqueid' in gdf_edges.columns: gdf_edges = gdf_edges.rename(columns={'uniqueid': 'id'}) else: gdf_edges = gdf_edges.reset_index().rename(columns={'index': 'id'}) # add default values for required attributes for table in [gdf_nodes, gdf_edges]: table['uid'] = '1' table['user'] = 'osmnx' table['version'] = '1' table['changeset'] = '1' table['timestamp'] = '2017-01-01T00:00:00Z' # convert all datatypes to str nodes = gdf_nodes.applymap(str) edges = gdf_edges.applymap(str) # misc. string replacements to meet OSM XML spec if 'oneway' in edges.columns: # fill blank oneway tags with default (False) edges.loc[pd.isnull(edges['oneway']), 'oneway'] = oneway edges.loc[:, 'oneway'] = edges['oneway'].astype(str) edges.loc[:, 'oneway'] = edges['oneway'].str.replace( 'False', 'no').replace('True', 'yes') # initialize XML tree with an OSM root element root = etree.Element('osm', attrib={'version': '1', 'generator': 'OSMnx'}) # append nodes to the XML tree for i, row in nodes.iterrows(): node = etree.SubElement( root, 'node', attrib=row[node_attrs].dropna().to_dict()) for tag in node_tags: etree.SubElement( node, 'tag', attrib={'k': tag, 'v': row[tag]}) # append edges to the XML tree for e in edges.id.unique(): all_way_edges = edges[edges['id'] == e] first = all_way_edges.iloc[0] edge = etree.SubElement( root, 'way', attrib=first[edge_attrs].dropna().to_dict()) if len(all_way_edges) == 1: etree.SubElement(edge, 'nd', attrib={'ref': first['u']}) etree.SubElement(edge, 'nd', attrib={'ref': first['v']}) else: ordered_nodes = get_unique_nodes_ordered_from_way(all_way_edges) for node in ordered_nodes: etree.SubElement(edge, 'nd', attrib={'ref': node}) for tag in edge_tags: etree.SubElement( edge, 'tag', attrib={'k': tag, 'v': first[tag]}) et = etree.ElementTree(root) if not os.path.exists(folder): os.makedirs(folder) et.write(os.path.join(folder, filename)) log('Saved graph to disk as OSM at ""{}"" in {:,.2f} seconds'.format( os.path.join(folder, filename), time.time() - start_time)) ","def save_graph_osm(G, node_tags=settings.osm_xml_node_tags, node_attrs=settings.osm_xml_node_attrs, edge_tags=settings.osm_xml_way_tags, edge_attrs=settings.osm_xml_way_attrs, oneway=False, filename='graph.osm', folder=None): """""" Save a graph as an OSM XML formatted file. NOTE: for very large networks this method can take upwards of 30+ minutes to finish. Parameters __________ G : networkx multidigraph or multigraph filename : string the name of the osm file (including file extension) folder : string the folder to contain the file, if None, use default data folder Returns ------- None """""" start_time = time.time() if folder is None: folder = settings.data_folder # get undirected graph so we don't generate duplicate nodes H = get_undirected(G) # create a copy to convert all the node/edge attribute values to string H_save = get_undirected(G).copy() gdf_nodes, gdf_edges = graph_to_gdfs( H_save, node_geometry=False, fill_edge_geometry=False) # rename columns per osm specification gdf_nodes.rename( columns={'osmid': 'id', 'x': 'lon', 'y': 'lat'}, inplace=True) if 'uniqueid' in gdf_edges.columns: gdf_edges = gdf_edges.rename(columns={'uniqueid': 'id'}) else: gdf_edges = gdf_edges.reset_index().rename(columns={'index': 'id'}) # add default values for required attributes for table in [gdf_nodes, gdf_edges]: table['uid'] = '1' table['user'] = 'osmnx' table['version'] = '1' table['changeset'] = '1' table['timestamp'] = '2017-01-01T00:00:00Z' # convert all datatypes to str nodes = gdf_nodes.applymap(str) edges = gdf_edges.applymap(str) # misc. string replacements to meet OSM XML spec if 'oneway' in edges.columns: # fill blank oneway tags with default (False) edges.loc[pd.isnull(edges['oneway']), 'oneway'] = oneway edges.loc[:, 'oneway'] = edges['oneway'].astype(str) edges.loc[:, 'oneway'] = edges['oneway'].str.replace( 'False', 'no').replace('True', 'yes') # initialize XML tree with an OSM root element root = etree.Element('osm', attrib={'version': '1', 'generator': 'OSMnx'}) # append nodes to the XML tree for i, row in nodes.iterrows(): node = etree.SubElement( root, 'node', attrib=row[node_attrs].dropna().to_dict()) for tag in node_tags: etree.SubElement( node, 'tag', attrib={'k': tag, 'v': row[tag]}) # append edges to the XML tree for e in edges.id.unique(): all_way_edges = edges[edges['id'] == e] first = all_way_edges.iloc[0] edge = etree.SubElement( root, 'way', attrib=first[edge_attrs].dropna().to_dict()) if len(all_way_edges) == 1: etree.SubElement(edge, 'nd', attrib={'ref': first['u']}) etree.SubElement(edge, 'nd', attrib={'ref': first['v']}) else: ordered_nodes = get_unique_nodes_ordered_from_way(all_way_edges) for node in ordered_nodes: etree.SubElement(edge, 'nd', attrib={'ref': node}) for tag in edge_tags: etree.SubElement( edge, 'tag', attrib={'k': tag, 'v': first[tag]}) et = etree.ElementTree(root) if not os.path.exists(folder): os.makedirs(folder) et.write(os.path.join(folder, filename)) log('Saved graph to disk as OSM at ""{}"" in {:,.2f} seconds'.format( os.path.join(folder, filename), time.time() - start_time)) " 22832,"def _get_leap_year(year, future): """""" Iterate through previous or next years until it gets a valid leap year This is performed to avoid missing or including centurial leap years """""" difference = 1 if future else -1 leap_year = year + difference while not calendar.isleap(leap_year): leap_year = leap_year + difference return leap_year ","def _get_leap_year(year, future): """""" Iterate through previous or next years until it gets a valid leap year This is performed to avoid missing or including centurial leap years """""" difference = 1 if future else -1 leap_year = year + difference while not calendar.isleap(leap_year): leap_year += difference return leap_year " 31610,"def main(): params = demisto.params() use_ssl = not params.get(""insecure"", False) use_proxy = params.get(""proxy"", False) client = Client(params[""server_url""], params[""api_secret""], use_ssl, use_proxy) commands: Dict[str, Callable] = {""trendmicro-list-computers"": list_computers_command, ""trendmicro-create-computer"": create_computer_command, ""trendmicro-search-computers"": search_computers_command, ""trendmicro-get-computer"": get_computer_command, ""trendmicro-modify-computer"": modify_computer_command, ""trendmicro-delete-computer"": delete_computer_command, ""trendmicro-get-computer-setting"": get_computer_setting_command, ""trendmicro-modify-computer-setting"": modify_computer_setting_command, ""trendmicro-reset-computer-setting"": reset_computer_setting_command, ""trendmicro-list-firewall-rule-ids-of-computer"": list_firewall_rule_ids_of_computer_command, ""trendmicro-add-firewall-rule-ids-to-computer"": add_firewall_rule_ids_to_computer_command, ""trendmicro-set-firewall-rule-ids-to-computer"": set_firewall_rule_ids_to_computer_command, ""trendmicro-remove-firewall-rule-id-from-computer"": remove_firewall_rule_id_from_computer_command, # noqa: E501 ""trendmicro-list-computer-groups"": list_computer_groups_command, ""trendmicro-create-computer-group"": create_computer_group_command, ""trendmicro-search-computer-groups"": search_computer_groups_command, ""trendmicro-get-computer-group"": get_computer_group_command, ""trendmicro-modify-computer-group"": modify_computer_group_command, ""trendmicro-delete-computer-group"": delete_computer_group_command, ""trendmicro-search-firewall-rules"": search_firewall_rules_command, ""trendmicro-list-firewall-rules"": list_firewall_rules_command, ""trendmicro-create-firewall-rule"": create_firewall_rule_command, ""trendmicro-get-firewall-rule"": get_firewall_rule_command, ""trendmicro-modify-firewall-rule"": modify_firewall_rule_command, ""trendmicro-delete-firewall-rule"": delete_firewall_rule_command, ""trendmicro-search-policies"": search_policies_command, ""trendmicro-get-policy"": get_policy_command, ""trendmicro-modify-policy"": modify_policy_command, ""trendmicro-delete-policy"": delete_policy_command, ""trendmicro-get-default-policy-setting"": get_default_policy_setting_command, ""trendmicro-modify-default-policy-setting"": modify_default_policy_setting_command, ""trendmicro-reset-default-policy-setting"": reset_default_policy_setting_command, ""trendmicro-list-default-policy-settings"": list_default_policy_settings_command, ""trendmicro-get-policy-setting"": get_policy_setting_command, ""trendmicro-modify-policy-setting"": modify_policy_setting_command, ""trendmicro-reset-policy-setting"": reset_policy_setting_command, ""trendmicro-list-policies"": list_policies_command, ""trendmicro-create-policy"": create_policy_command, ""test-module"": test_module} error_message = """" try: command = demisto.command() if command in commands: command_function = commands[command] return_results(command_function(client, **convert_args(command_function, demisto.args()))) else: raise NotImplementedError(f""The command {command} does not exist on TrendMicro!"") except (ConnectionError, InvalidURL, InvalidSchema) as e: demisto.error(traceback.format_exc()) error_message = f""{INVALID_URL_ERROR}\nError:\n{e}"" except HTTPError as e: demisto.error(traceback.format_exc()) error_message = f""Error in API call [{e.response.status_code}]\n{e.response.json()['message']}"" except Exception as e: demisto.error(traceback.format_exc()) error_message = f""Failed to execute {demisto.command()} command.\nError:\n{e}"" return_error(error_message) ","def main(): params = demisto.params() use_ssl = not params.get(""insecure"", False) use_proxy = params.get(""proxy"", False) client = Client(params[""server_url""], params[""api_secret""], use_ssl, use_proxy) commands: Dict[str, Callable] = {""trendmicro-list-computers"": list_computers_command, ""trendmicro-create-computer"": create_computer_command, ""trendmicro-search-computers"": search_computers_command, ""trendmicro-get-computer"": get_computer_command, ""trendmicro-modify-computer"": modify_computer_command, ""trendmicro-delete-computer"": delete_computer_command, ""trendmicro-get-computer-setting"": get_computer_setting_command, ""trendmicro-modify-computer-setting"": modify_computer_setting_command, ""trendmicro-reset-computer-setting"": reset_computer_setting_command, ""trendmicro-list-firewall-rule-ids-of-computer"": list_firewall_rule_ids_of_computer_command, ""trendmicro-add-firewall-rule-ids-to-computer"": add_firewall_rule_ids_to_computer_command, ""trendmicro-set-firewall-rule-ids-to-computer"": set_firewall_rule_ids_to_computer_command, ""trendmicro-remove-firewall-rule-id-from-computer"": remove_firewall_rule_id_from_computer_command, # noqa: E501 ""trendmicro-list-computer-groups"": list_computer_groups_command, ""trendmicro-create-computer-group"": create_computer_group_command, ""trendmicro-search-computer-groups"": search_computer_groups_command, ""trendmicro-get-computer-group"": get_computer_group_command, ""trendmicro-modify-computer-group"": modify_computer_group_command, ""trendmicro-delete-computer-group"": delete_computer_group_command, ""trendmicro-search-firewall-rules"": search_firewall_rules_command, ""trendmicro-list-firewall-rules"": list_firewall_rules_command, ""trendmicro-create-firewall-rule"": create_firewall_rule_command, ""trendmicro-get-firewall-rule"": get_firewall_rule_command, ""trendmicro-modify-firewall-rule"": modify_firewall_rule_command, ""trendmicro-delete-firewall-rule"": delete_firewall_rule_command, ""trendmicro-search-policies"": search_policies_command, ""trendmicro-get-policy"": get_policy_command, ""trendmicro-modify-policy"": modify_policy_command, ""trendmicro-delete-policy"": delete_policy_command, ""trendmicro-get-default-policy-setting"": get_default_policy_setting_command, ""trendmicro-modify-default-policy-setting"": modify_default_policy_setting_command, ""trendmicro-reset-default-policy-setting"": reset_default_policy_setting_command, ""trendmicro-list-default-policy-settings"": list_default_policy_settings_command, ""trendmicro-get-policy-setting"": get_policy_setting_command, ""trendmicro-modify-policy-setting"": modify_policy_setting_command, ""trendmicro-reset-policy-setting"": reset_policy_setting_command, ""trendmicro-list-policies"": list_policies_command, ""trendmicro-create-policy"": create_policy_command, ""test-module"": test_module} error_message = """" try: command = demisto.command() if command in commands: command_function = commands.get(command) return_results(command_function(client, **convert_args(command_function, demisto.args()))) else: raise NotImplementedError(f""The command {command} does not exist on TrendMicro!"") except (ConnectionError, InvalidURL, InvalidSchema) as e: demisto.error(traceback.format_exc()) error_message = f""{INVALID_URL_ERROR}\nError:\n{e}"" except HTTPError as e: demisto.error(traceback.format_exc()) error_message = f""Error in API call [{e.response.status_code}]\n{e.response.json()['message']}"" except Exception as e: demisto.error(traceback.format_exc()) error_message = f""Failed to execute {demisto.command()} command.\nError:\n{e}"" return_error(error_message) " 41695,"def filter_info(info: dict[str, Any], browser: str) -> dict[str, Any]: info = dict(info) # keep only flags related to the current browser flags_to_remove = [""firefox"", ""chrome"", ""node""] flags_to_remove.remove(browser) for browser in flags_to_remove: for key in list(info.keys()): if key.endswith(browser): del info[key] return info ","def filter_info(info: dict[str, Any], browser: str) -> dict[str, Any]: info = dict(info) # keep only flags related to the current browser flags_to_remove = [""firefox"", ""chrome"", ""node""] flags_to_remove.remove(browser) for browser in flags_to_remove: for key in info: if key.endswith(browser): del info[key] return info " 23181,"def _analyze_paths(file_list, fs, root=False): """"""Consolidate list of file-paths into parquet relative paths Note: This function was mostly copied from dask/fastparquet to use in both `FastParquetEngine` and `ArrowEngine`."""""" def _join_path(*path): def _scrub(i, p): # Convert path to standard form # this means windows path separators are converted to linux p = p.replace(fs.sep, ""/"") if p == """": # empty path is assumed to be a relative path return ""."" if p[-1] == ""/"": # trailing slashes are not allowed p = p[:-1] if i > 0 and p[0] == ""/"": # only the first path can start with / p = p[1:] return p abs_prefix = """" if path and path[0]: if path[0][0] == ""/"": abs_prefix = ""/"" path = list(path) path[0] = path[0][1:] elif fs.sep == ""\\"" and path[0][1:].startswith("":/""): # If windows, then look for the ""c:/"" prefix abs_prefix = path[0][0:3] path = list(path) path[0] = path[0][3:] _scrubbed = [] for i, p in enumerate(path): _scrubbed.extend(_scrub(i, p).split(""/"")) simpler = [] for s in _scrubbed: if s == ""."": pass elif s == "".."": if simpler: if simpler[-1] == "".."": simpler.append(s) else: simpler.pop() elif abs_prefix: raise Exception(""can not get parent of root"") else: simpler.append(s) else: simpler.append(s) if not simpler: if abs_prefix: joined = abs_prefix else: joined = ""."" else: joined = abs_prefix + (""/"".join(simpler)) return joined path_parts_list = [_join_path(fn).split(""/"") for fn in file_list] if root is False: basepath = path_parts_list[0][:-1] for _, path_parts in enumerate(path_parts_list): j = len(path_parts) - 1 for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)): if base_part != path_part: j = k break basepath = basepath[:j] l = len(basepath) else: basepath = _join_path(root).split(""/"") l = len(basepath) assert all( p[:l] == basepath for p in path_parts_list ), ""All paths must begin with the given root"" out_list = [] for path_parts in path_parts_list: out_list.append( ""/"".join(path_parts[l:]) ) # use '/'.join() instead of _join_path to be consistent with split('/') return ( ""/"".join(basepath), out_list, ) # use '/'.join() instead of _join_path to be consistent with split('/') ","def _analyze_paths(file_list, fs, root=False): """"""Consolidate list of file-paths into parquet relative paths Note: This function was mostly copied from dask/fastparquet to use in both `FastParquetEngine` and `ArrowEngine`."""""" def _join_path(*path): def _scrub(i, p): # Convert path to standard form # this means windows path separators are converted to linux p = p.replace(fs.sep, ""/"") if p == """": # empty path is assumed to be a relative path return ""."" if p[-1] == ""/"": # trailing slashes are not allowed p = p[:-1] if i > 0 and p[0] == ""/"": # only the first path can start with / p = p[1:] return p abs_prefix = """" if path and path[0]: if path[0][0] == ""/"": abs_prefix = ""/"" path = list(path) path[0] = path[0][1:] elif fs.sep == ""\\"" and path[0][1:].startswith("":/""): # If windows, then look for the ""c:/"" prefix abs_prefix = path[0][0:3] path = list(path) path[0] = path[0][3:] _scrubbed = [] for i, p in enumerate(path): _scrubbed.extend(_scrub(i, p).split(""/"")) simpler = [] for s in _scrubbed: if s == ""."": pass elif s == "".."": if simpler: if simpler[-1] == "".."": simpler.append(s) else: simpler.pop() elif abs_prefix: raise Exception(""can not get parent of root"") else: simpler.append(s) else: simpler.append(s) if not simpler: if abs_prefix: joined = abs_prefix else: joined = ""."" else: joined = abs_prefix + (""/"".join(simpler)) return joined path_parts_list = [_join_path(fn).split(""/"") for fn in file_list] if root is False: basepath = path_parts_list[0][:-1] for path_parts in path_parts_list: j = len(path_parts) - 1 for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)): if base_part != path_part: j = k break basepath = basepath[:j] l = len(basepath) else: basepath = _join_path(root).split(""/"") l = len(basepath) assert all( p[:l] == basepath for p in path_parts_list ), ""All paths must begin with the given root"" out_list = [] for path_parts in path_parts_list: out_list.append( ""/"".join(path_parts[l:]) ) # use '/'.join() instead of _join_path to be consistent with split('/') return ( ""/"".join(basepath), out_list, ) # use '/'.join() instead of _join_path to be consistent with split('/') " 32282,"def panorama_query_logs_command(args: dict): """""" Query logs """""" log_type = args.get('log-type') number_of_logs = args.get('number_of_logs') query = args.get('query') address_src = args.get('addr-src') address_dst = args.get('addr-dst') ip_ = args.get('ip') zone_src = args.get('zone-src') zone_dst = args.get('zone-dst') time_generated = args.get('time-generated') action = args.get('action') port_dst = args.get('port-dst') rule = args.get('rule') filedigest = args.get('filedigest') url = args.get('url') use_polling = args.get('polling', 'false') == 'true' job_id = args.get('job_id') cmd = demisto.command() interval_in_seconds = int(args.get('interval_in_seconds', 60)) timeout = int(args.get('timeout', 600)) script_results = [] if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') if use_polling: ScheduledCommand.raise_error_if_not_supported() if not job_id: # create new search result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') job_id = result['response']['result']['job'] polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) readable_output = f""Panorama log query search created successfully (Job ID: {job_id})"" script_results.append(CommandResults( readable_output=readable_output, scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if result['response']['result']['job']['status'] != ""FIN"": polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) script_results.append(CommandResults( scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Complete' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][ 'result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) script_results.append(CommandResults( outputs_prefix='Panorama.Monitor', outputs_key_field='JobID', outputs=result, readable_output=human_readable, ignore_auto_extract=True)) else: result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: raise Exception(f""Query logs failed. Reason is: {result['response']['msg']['line']}"") else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } script_results.append({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {""Panorama.Monitor(val.JobID == obj.JobID)"": query_logs_output} }) return_results(script_results) ","def panorama_query_logs_command(args: dict): """""" Query logs """""" log_type = args.get('log-type') number_of_logs = args.get('number_of_logs') query = args.get('query') address_src = args.get('addr-src') address_dst = args.get('addr-dst') ip_ = args.get('ip') zone_src = args.get('zone-src') zone_dst = args.get('zone-dst') time_generated = args.get('time-generated') action = args.get('action') port_dst = args.get('port-dst') rule = args.get('rule') filedigest = args.get('filedigest') url = args.get('url') use_polling = args.get('polling', 'false') == 'true' job_id = args.get('job_id') cmd = demisto.command() interval_in_seconds = int(args.get('interval_in_seconds', 60)) timeout = int(args.get('timeout', 600)) script_results = [] if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') if use_polling: ScheduledCommand.raise_error_if_not_supported() if not job_id: # create new search result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception(f'Missing JobID in response {result[""response""]}.') job_id = result['response']['result']['job'] polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) readable_output = f""Panorama log query search created successfully (Job ID: {job_id})"" script_results.append(CommandResults( readable_output=readable_output, scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if result['response']['result']['job']['status'] != ""FIN"": polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) script_results.append(CommandResults( scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Complete' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][ 'result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) script_results.append(CommandResults( outputs_prefix='Panorama.Monitor', outputs_key_field='JobID', outputs=result, readable_output=human_readable, ignore_auto_extract=True)) else: result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: raise Exception(f""Query logs failed. Reason is: {result['response']['msg']['line']}"") else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } script_results.append({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {""Panorama.Monitor(val.JobID == obj.JobID)"": query_logs_output} }) return_results(script_results) " 34367,"def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): export_parser_args = { ""parents"": parents, ""conflict_handler"": ""resolve"", ""formatter_class"": argparse.ArgumentDefaultsHelpFormatter, ""help"": ""Export Rasa trackers using an event broker."", } shell_parser = subparsers.add_parser(""export"", **export_parser_args) shell_parser.set_defaults(func=export_trackers) arguments.set_export_arguments(shell_parser) ","def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ) -> None: export_parser_args = { ""parents"": parents, ""conflict_handler"": ""resolve"", ""formatter_class"": argparse.ArgumentDefaultsHelpFormatter, ""help"": ""Export Rasa trackers using an event broker."", } shell_parser = subparsers.add_parser(""export"", **export_parser_args) shell_parser.set_defaults(func=export_trackers) arguments.set_export_arguments(shell_parser) " 43226,"def get_reexecution_strategy( run: PipelineRun, instance: DagsterInstance ) -> Optional[ReexecutionStrategy]: raw_strategy_tag = run.tags.get(RETRY_STRATEGY_TAG) if raw_strategy_tag is None: return None try: return ReexecutionStrategy[raw_strategy_tag] except KeyError: instance.report_engine_event( f""Error parsing retry strategy from tag {RETRY_STRATEGY_TAG}: {raw_strategy_tag}"", run ) return None ","def get_reexecution_strategy( run: PipelineRun, instance: DagsterInstance ) -> Optional[ReexecutionStrategy]: raw_strategy_tag = run.tags.get(RETRY_STRATEGY_TAG) if raw_strategy_tag is None: return None try: return ReexecutionStrategy[raw_strategy_tag] except KeyError: instance.report_engine_event( f""Error parsing retry strategy from tag '{RETRY_STRATEGY_TAG}: {raw_strategy_tag}'"", run ) return None " 15260,"def mock_mode(mode_id, name): """"""Mock Canary Mode class."""""" mode = MagicMock() type(mode).mode_id = PropertyMock(return_value=mode_id) type(mode).name = PropertyMock(return_value=mode_name) type(mode).resource_url = PropertyMock(return_value=f""/v1/modes/{mode_id}"") return mode ","def mock_mode(mode_id, name): """"""Mock Canary Mode class."""""" mode = MagicMock() type(mode).mode_id = PropertyMock(return_value=mode_id) type(mode).name = PropertyMock(return_value=name) type(mode).resource_url = PropertyMock(return_value=f""/v1/modes/{mode_id}"") return mode " 12158,"def unpack(blob, **kwargs): if config['blob.encode_bypass'] is True: return blob if blob is None: return None return BlobReader(blob, **kwargs).unpack() ","def unpack(blob, **kwargs): if config['blob.encode_bypass']: return blob if blob is None: return None return BlobReader(blob, **kwargs).unpack() " 6799,"def get_slide_settings(): slides = [] slide_settings = frappe.get_single('Setup Wizard Settings') for entry in slide_settings.slide_order: slide_doc = frappe.get_doc('Setup Wizard Slide', entry.slide) if frappe.scrub(slide_doc.app) in frappe.get_installed_apps(): slides.append(frappe._dict( slide_type = slide_doc.slide_type, title = slide_doc.slide_title, help = slide_doc.slide_desc, domains = get_domains(slide_doc), fields = slide_doc.slide_fields, help_links = get_help_links(slide_doc), add_more = slide_doc.add_more_button, max_count = slide_doc.max_count, submit_method = get_submit_method(slide_doc), image_src = get_slide_image(slide_doc) )) return slides ","def get_onboarding_slides_as_list(): slides = [] slide_settings = frappe.get_single('Setup Wizard Settings') for entry in slide_settings.slide_order: slide_doc = frappe.get_doc('Setup Wizard Slide', entry.slide) if frappe.scrub(slide_doc.app) in frappe.get_installed_apps(): slides.append(frappe._dict( slide_type = slide_doc.slide_type, title = slide_doc.slide_title, help = slide_doc.slide_desc, domains = get_domains(slide_doc), fields = slide_doc.slide_fields, help_links = get_help_links(slide_doc), add_more = slide_doc.add_more_button, max_count = slide_doc.max_count, submit_method = get_submit_method(slide_doc), image_src = get_slide_image(slide_doc) )) return slides " 18479,"def clean_environment(): # Stuff in here sanitizes the build environment to eliminate # anything the user has set that may interfere. We apply it immediately # unlike the other functions so it doesn't overwrite what the modules load. env = EnvironmentModifications() # Remove these vars from the environment during build because they # can affect how some packages find libraries. We want to make # sure that builds never pull in unintended external dependencies. env.unset('LD_LIBRARY_PATH') env.unset('LIBRARY_PATH') env.unset('CPATH') env.unset('LD_RUN_PATH') env.unset('DYLD_LIBRARY_PATH') env.unset('DYLD_FALLBACK_LIBRARY_PATH') # On Cray ""cluster"" systems, unset CRAY_LD_LIBRARY_PATH to avoid # interference with Spack dependencies. # CNL requires these variables to be set (or at least some of them, # depending on the CNL version). hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target') on_cray = str(hostarch.platform) == 'cray' using_cnl = re.match(r'cnl\d+', str(hostarch.os)) if on_cray and not using_cnl: env.unset('CRAY_LD_LIBRARY_PATH') for varname in os.environ.keys(): if 'PKGCONF' in varname: env.unset(varname) # Unset the following variables because they can affect installation of # Autotools and CMake packages. build_system_vars = [ 'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables 'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables 'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables 'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables 'LDFLAGS', 'LIBS' # linker variables ] for v in build_system_vars: env.unset(v) # Unset mpi environment vars. These flags should only be set by # mpi providers for packages with mpi dependencies mpi_vars = [ 'MPICC', 'MPICXX', 'MPIF70', 'MPIF90' ] for v in mpi_vars: env.unset(v) build_lang = spack.config.get('config:build_language') if build_lang: # Override language-related variables. This can be used to force # English compiler messages etc., which allows parse_log_events to # show useful matches. env.set('LC_ALL', build_lang) # Remove any macports installs from the PATH. The macports ld can # cause conflicts with the built-in linker on el capitan. Solves # assembler issues, e.g.: # suffix or operands invalid for `movq'"" path = get_path('PATH') for p in path: if '/macports/' in p: env.remove_path('PATH', p) env.apply_modifications() ","def clean_environment(): # Stuff in here sanitizes the build environment to eliminate # anything the user has set that may interfere. We apply it immediately # unlike the other functions so it doesn't overwrite what the modules load. env = EnvironmentModifications() # Remove these vars from the environment during build because they # can affect how some packages find libraries. We want to make # sure that builds never pull in unintended external dependencies. env.unset('LD_LIBRARY_PATH') env.unset('LIBRARY_PATH') env.unset('CPATH') env.unset('LD_RUN_PATH') env.unset('DYLD_LIBRARY_PATH') env.unset('DYLD_FALLBACK_LIBRARY_PATH') # On Cray ""cluster"" systems, unset CRAY_LD_LIBRARY_PATH to avoid # interference with Spack dependencies. # CNL requires these variables to be set (or at least some of them, # depending on the CNL version). hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target') on_cray = str(hostarch.platform) == 'cray' using_cnl = re.match(r'cnl\d+', str(hostarch.os)) if on_cray and not using_cnl: env.unset('CRAY_LD_LIBRARY_PATH') for varname in os.environ.keys(): if 'PKGCONF' in varname: env.unset(varname) # Unset the following variables because they can affect installation of # Autotools and CMake packages. build_system_vars = [ 'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables 'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables 'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables 'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables 'LDFLAGS', 'LIBS' # linker variables ] for v in build_system_vars: env.unset(v) # Unset mpi environment vars. These flags should only be set by # mpi providers for packages with mpi dependencies mpi_vars = [ 'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90' ] for v in mpi_vars: env.unset(v) build_lang = spack.config.get('config:build_language') if build_lang: # Override language-related variables. This can be used to force # English compiler messages etc., which allows parse_log_events to # show useful matches. env.set('LC_ALL', build_lang) # Remove any macports installs from the PATH. The macports ld can # cause conflicts with the built-in linker on el capitan. Solves # assembler issues, e.g.: # suffix or operands invalid for `movq'"" path = get_path('PATH') for p in path: if '/macports/' in p: env.remove_path('PATH', p) env.apply_modifications() " 44046,"def generate_symmetries(qubit_op, num_qubits): """"""Get generators of symmetries, taus, for a given Hamiltonian. Args: qubit_op (Hamiltonian): Hamiltonian for which symmetries are to be generated to perform tapering. num_qubits (int): number of wires required to define the Hamiltonian. Returns: generators (list): list of generators of symmetries, taus, for the Hamiltonian. .. code-block:: >>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414])) >>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates) >>> generators = generate_symmetries(H, qubits) [(1.0) [Z0 Z1], (1.0) [Z0 Z2], (1.0) [Z0 Z3]] """""" # Generate binary matrix for qubit_op E = _binary_matrix(qubit_op.ops, num_qubits) # Get reduced row echelon form of binary matrix E E_rref = _reduced_row_echelon(E) E_reduced = E_rref[~np.all(E_rref == 0, axis=1)] # remove all-zero rows # Get kernel (i.e., nullspace) for trimmed binary matrix using gaussian elimination nullspace = _kernel(E_reduced) # Get generators tau from the calculated nullspace generators = generate_taus(nullspace, num_qubits) # Get unitaries from the calculated nullspace pauli_x = generate_paulis(generators, num_qubits) return generators, pauli_x ","def generate_symmetries(qubit_op, num_qubits): """"""Get the tau symmetry generators for a given Hamiltonian. Args: qubit_op (Hamiltonian): Hamiltonian for which symmetries are to be generated to perform tapering. num_qubits (int): number of wires required to define the Hamiltonian. Returns: generators (list): list of generators of symmetries, taus, for the Hamiltonian. .. code-block:: >>> symbols, coordinates = (['H', 'H'], np.array([0., 0., -0.66140414, 0., 0., 0.66140414])) >>> H, qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates) >>> generators = generate_symmetries(H, qubits) [(1.0) [Z0 Z1], (1.0) [Z0 Z2], (1.0) [Z0 Z3]] """""" # Generate binary matrix for qubit_op E = _binary_matrix(qubit_op.ops, num_qubits) # Get reduced row echelon form of binary matrix E E_rref = _reduced_row_echelon(E) E_reduced = E_rref[~np.all(E_rref == 0, axis=1)] # remove all-zero rows # Get kernel (i.e., nullspace) for trimmed binary matrix using gaussian elimination nullspace = _kernel(E_reduced) # Get generators tau from the calculated nullspace generators = generate_taus(nullspace, num_qubits) # Get unitaries from the calculated nullspace pauli_x = generate_paulis(generators, num_qubits) return generators, pauli_x " 1620,"def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None, increasing=True): """"""Solve the isotonic regression model:: min sum w[i] (y[i] - y_[i]) ** 2 subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max where: - y[i] are inputs (real numbers) - y_[i] are fitted - w[i] are optional strictly positive weights (default to 1.0) Read more in the :ref:`User Guide `. Parameters ---------- y : array-like of shape=(n_samples,) The data. sample_weight : array-like of shape=(n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : optional, default: None If not None, set the lowest value of the fit to y_min. y_max : optional, default: None If not None, set the highest value of the fit to y_max. increasing : boolean, optional, default: True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False) Returns ------- y_ : list of floats Isotonic fit of y. References ---------- ""Active set algorithms for isotonic regression; A unifying framework"" by Michael J. Best and Nilotpal Chakravarti, section 3. """""" order = np.s_[:] if increasing else np.s_[::-1] y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32]) y = np.array(y[order], dtype=y.dtype) if sample_weight is None: sample_weight = np.ones(len(y), dtype=y.dtype) else: sample_weight = np.array(sample_weight[order], dtype=y.dtype) _inplace_contiguous_isotonic_regression(y, sample_weight) if y_min is not None or y_max is not None: # Older versions of np.clip don't accept None as a bound, so use np.inf if y_min is None: y_min = -np.inf if y_max is None: y_max = np.inf np.clip(y, y_min, y_max, y) return y[order] ","def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None, increasing=True): """"""Solve the isotonic regression model:: min sum w[i] (y[i] - y_[i]) ** 2 subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max where: - y[i] are inputs (real numbers) - y_[i] are fitted - w[i] are optional strictly positive weights (default to 1.0) Read more in the :ref:`User Guide `. Parameters ---------- y : array-like of shape=(n_samples,) The data. sample_weight : array-like of shape (n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : optional, default: None If not None, set the lowest value of the fit to y_min. y_max : optional, default: None If not None, set the highest value of the fit to y_max. increasing : boolean, optional, default: True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False) Returns ------- y_ : list of floats Isotonic fit of y. References ---------- ""Active set algorithms for isotonic regression; A unifying framework"" by Michael J. Best and Nilotpal Chakravarti, section 3. """""" order = np.s_[:] if increasing else np.s_[::-1] y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32]) y = np.array(y[order], dtype=y.dtype) if sample_weight is None: sample_weight = np.ones(len(y), dtype=y.dtype) else: sample_weight = np.array(sample_weight[order], dtype=y.dtype) _inplace_contiguous_isotonic_regression(y, sample_weight) if y_min is not None or y_max is not None: # Older versions of np.clip don't accept None as a bound, so use np.inf if y_min is None: y_min = -np.inf if y_max is None: y_max = np.inf np.clip(y, y_min, y_max, y) return y[order] " 5327,"def install_pyenv(name, user=None): ''' Install pyenv if not installed. Allows you to require pyenv be installed prior to installing the plugins. Useful if you want to install pyenv plugins via the git or file modules and need them installed before installing any rubies. Use the pyenv.root configuration option to set the path for pyenv if you want a system wide install that is not in a user home dir. user: None The user to run pyenv as. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if __opts__['test']: ret['comment'] = 'pyenv is set to be installed' return ret ret = _check_pyenv(ret, user) if not ret['result'] is True: if __salt__['pyenv.install'](user): ret['result'] = True ret['comment'] = 'Successfully installed pyenv' return ret ","def install_pyenv(name, user=None): ''' Install pyenv if not installed. Allows you to require pyenv be installed prior to installing the plugins. Useful if you want to install pyenv plugins via the git or file modules and need them installed before installing any rubies. Use the pyenv.root configuration option to set the path for pyenv if you want a system wide install that is not in a user home dir. user: None The user to run pyenv as. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if __opts__['test']: ret['comment'] = 'pyenv is set to be installed' return ret ret = _check_pyenv(ret, user) if not ret['result']: if __salt__['pyenv.install'](user): ret['result'] = True ret['comment'] = 'Successfully installed pyenv' return ret " 42996,"def create_config_object(authentication_token="""", **kwargs): """"""Create a configuration object that stores configuration related data organized into sections. Currently API related configuration options are defined. This function takes into consideration only pre-defined options. If called without passing any keyword arguments, then a default configuration object is created. Keyword arguments: authentication_token (str): the token to be used for user authentication hostname (str): the name of the host to connect to use_ssl (bool): specifies if requests should be sent using SSL port (int): the port to be used when connecting to the remote service debug (bool): determines if the debugging mode is requested Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration object """""" hostname = kwargs.get(""hostname"", ""localhost"") use_ssl = kwargs.get(""use_ssl"", True) port = kwargs.get(""port"", 443) debug = kwargs.get(""debug"", False) config = { ""api"": { ""authentication_token"": authentication_token, ""hostname"": hostname, ""use_ssl"": use_ssl, ""port"": port, ""debug"": debug } } return config ","def create_config_object(authentication_token="""", **kwargs): """"""Create a configuration object that stores configuration related data organized into sections. Currently API-related configuration options are defined. This function takes into consideration only pre-defined options. If called without passing any keyword arguments, then a default configuration object is created. Keyword arguments: authentication_token (str): the token to be used for user authentication hostname (str): the name of the host to connect to use_ssl (bool): specifies if requests should be sent using SSL port (int): the port to be used when connecting to the remote service debug (bool): determines if the debugging mode is requested Returns: dict[str, dict[str, Union[str, bool, int]]]: the configuration object """""" hostname = kwargs.get(""hostname"", ""localhost"") use_ssl = kwargs.get(""use_ssl"", True) port = kwargs.get(""port"", 443) debug = kwargs.get(""debug"", False) config = { ""api"": { ""authentication_token"": authentication_token, ""hostname"": hostname, ""use_ssl"": use_ssl, ""port"": port, ""debug"": debug } } return config " 52814,"def targets_to_mulled_name(targets, hash_func, namespace, resolution_cache=None): unresolved_cache_key = ""galaxy.tool_util.deps.container_resolvers.mulled:unresolved"" if resolution_cache is not None: if unresolved_cache_key not in resolution_cache: resolution_cache[unresolved_cache_key] = set() unresolved_cache = resolution_cache.get(unresolved_cache_key) else: unresolved_cache = set() mulled_resolution_cache = None if resolution_cache and hasattr(resolution_cache, 'mulled_resolution_cache'): mulled_resolution_cache = resolution_cache.mulled_resolution_cache name = None def cached_name(cache_key): if mulled_resolution_cache: if cache_key in mulled_resolution_cache: return resolution_cache.get(cache_key) return None if len(targets) == 1: target = targets[0] target_version = target.version cache_key = ""ns[%s]__single__%s__@__%s"" % (namespace, target.package_name, target_version) if cache_key in unresolved_cache: return None name = cached_name(cache_key) if name: return name tags = mulled_tags_for(namespace, target.package_name, resolution_cache=resolution_cache) if tags: for tag in tags: if '--' in tag: version, build = split_tag(tag) else: version = tag build = None if target_version and version == target_version: name = ""%s:%s"" % (target.package_name, version) if build: name = ""%s--%s"" % (name, build) break else: def first_tag_if_available(image_name): if "":"" in image_name: repo_name, tag_prefix = image_name.split("":"", 2) else: repo_name = image_name tag_prefix = None tags = mulled_tags_for(namespace, repo_name, tag_prefix=tag_prefix, resolution_cache=resolution_cache) return tags[0] if tags else None if hash_func == ""v2"": base_image_name = v2_image_name(targets) elif hash_func == ""v1"": base_image_name = v1_image_name(targets) else: raise Exception(""Unimplemented mulled hash_func [%s]"" % hash_func) cache_key = ""ns[%s]__%s__%s"" % (namespace, hash_func, base_image_name) if cache_key in unresolved_cache: return None name = cached_name(cache_key) if name: return name tag = first_tag_if_available(base_image_name) if tag: if "":"" in base_image_name: assert hash_func != ""v1"" # base_image_name of form :, expand tag # to include build number in tag. name = ""%s:%s"" % (base_image_name.split("":"")[0], tag) else: # base_image_name of form , simply add build number # as tag to fully qualify image. name = ""%s:%s"" % (base_image_name, tag) if name and mulled_resolution_cache: mulled_resolution_cache.put(cache_key, name) if name is None: unresolved_cache.add(name) return name ","def targets_to_mulled_name(targets, hash_func, namespace, resolution_cache=None): unresolved_cache_key = ""galaxy.tool_util.deps.container_resolvers.mulled:unresolved"" if resolution_cache is not None: if unresolved_cache_key not in resolution_cache: resolution_cache[unresolved_cache_key] = set() unresolved_cache = resolution_cache.get(unresolved_cache_key) else: unresolved_cache = set() mulled_resolution_cache = None if resolution_cache and hasattr(resolution_cache, 'mulled_resolution_cache'): mulled_resolution_cache = resolution_cache.mulled_resolution_cache name = None def cached_name(cache_key): if mulled_resolution_cache: if cache_key in mulled_resolution_cache: return resolution_cache.get(cache_key) return None if len(targets) == 1: target = targets[0] target_version = target.version cache_key = ""ns[%s]__single__%s__@__%s"" % (namespace, target.package_name, target_version) if cache_key in unresolved_cache: return None name = cached_name(cache_key) if name: return name tags = mulled_tags_for(namespace, target.package_name, resolution_cache=resolution_cache) if tags: for tag in tags: if '--' in tag: version, _ = split_tag(tag) else: version = tag if target_version and version == target_version: name = ""%s:%s"" % (target.package_name, tag) break else: def first_tag_if_available(image_name): if "":"" in image_name: repo_name, tag_prefix = image_name.split("":"", 2) else: repo_name = image_name tag_prefix = None tags = mulled_tags_for(namespace, repo_name, tag_prefix=tag_prefix, resolution_cache=resolution_cache) return tags[0] if tags else None if hash_func == ""v2"": base_image_name = v2_image_name(targets) elif hash_func == ""v1"": base_image_name = v1_image_name(targets) else: raise Exception(""Unimplemented mulled hash_func [%s]"" % hash_func) cache_key = ""ns[%s]__%s__%s"" % (namespace, hash_func, base_image_name) if cache_key in unresolved_cache: return None name = cached_name(cache_key) if name: return name tag = first_tag_if_available(base_image_name) if tag: if "":"" in base_image_name: assert hash_func != ""v1"" # base_image_name of form :, expand tag # to include build number in tag. name = ""%s:%s"" % (base_image_name.split("":"")[0], tag) else: # base_image_name of form , simply add build number # as tag to fully qualify image. name = ""%s:%s"" % (base_image_name, tag) if name and mulled_resolution_cache: mulled_resolution_cache.put(cache_key, name) if name is None: unresolved_cache.add(name) return name " 49890,"def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60, attributes=ATTRIBUTES, leap_day=False, full_name=PVLIB_PYTHON, affiliation=PVLIB_PYTHON, map_variables=None, timeout=30): """""" Retrieve NSRDB PSM3 timeseries weather data from the PSM3 API. The NSRDB is described in [1]_ and the PSM3 API is described in [2]_, [3]_, and [4]_. .. versionchanged:: 0.9.0 The function now returns a tuple where the first element is a dataframe and the second element is a dictionary containing metadata. Previous versions of this function had the return values switched. Parameters ---------- latitude : float or int in decimal degrees, between -90 and 90, north is positive longitude : float or int in decimal degrees, between -180 and 180, east is positive api_key : str NREL Developer Network API key email : str NREL API uses this to automatically communicate messages back to the user only if necessary names : str, default 'tmy' PSM3 API parameter specifing year or TMY variant to download, see notes below for options interval : int, {60, 5, 15, 30} interval size in minutes, must be 5, 15, 30 or 60. Only used for single-year requests (i.e., it is ignored for tmy/tgy/tdy requests). attributes : list of str, optional meteorological fields to fetch. If not specified, defaults to ``pvlib.iotools.psm3.ATTRIBUTES``. See references [2]_, [3]_, and [4]_ for lists of available fields. leap_day : boolean, default False include leap day in the results. Only used for single-year requests (i.e., it is ignored for tmy/tgy/tdy requests). full_name : str, default 'pvlib python' optional affiliation : str, default 'pvlib python' optional map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable PSM3_VARIABLE_MAP. timeout : int, default 30 time in seconds to wait for server response before timeout Returns ------- data : pandas.DataFrame timeseries data from NREL PSM3 metadata : dict metadata from NREL PSM3 about the record, see :func:`pvlib.iotools.parse_psm3` for fields Raises ------ requests.HTTPError if the request response status is not ok, then the ``'errors'`` field from the JSON response or any error message in the content will be raised as an exception, for example if the `api_key` was rejected or if the coordinates were not found in the NSRDB Notes ----- The required NREL developer key, `api_key`, is available for free by registering at the `NREL Developer Network `_. .. warning:: The ""DEMO_KEY"" `api_key` is severely rate limited and may result in rejected requests. The PSM3 API `names` parameter must be a single value from one of these lists: +-----------+-------------------------------------------------------------+ | Category | Allowed values | +===========+=============================================================+ | Year | 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, | | | 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, | | | 2018, 2019 | +-----------+-------------------------------------------------------------+ | TMY | tmy, tmy-2016, tmy-2017, tdy-2017, tgy-2017, | | | tmy-2018, tdy-2018, tgy-2018, tmy-2019, tdy-2019, tgy-2019 | +-----------+-------------------------------------------------------------+ .. warning:: PSM3 is limited to data found in the NSRDB, please consult the references below for locations with available data. Additionally, querying data with < 30-minute resolution uses a different API endpoint with fewer available fields (see [4]_). See Also -------- pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3 References ---------- .. [1] `NREL National Solar Radiation Database (NSRDB) `_ .. [2] `Physical Solar Model (PSM) v3 `_ .. [3] `Physical Solar Model (PSM) v3 TMY `_ .. [4] `Physical Solar Model (PSM) v3 - Five Minute Temporal Resolution `_ """""" # The well know text (WKT) representation of geometry notation is strict. # A POINT object is a string with longitude first, then the latitude, with # four decimals each, and exactly one space between them. longitude = ('%9.4f' % longitude).strip() latitude = ('%8.4f' % latitude).strip() # TODO: make format_WKT(object_type, *args) in tools.py # convert to string to accomodate integer years being passed in names = str(names) # convert pvlib names in attributes to psm3 convention (reverse mapping) # unlike psm3 columns, attributes are lower case and with underscores amap = {value: key.lower().replace(' ', '_') for (key, value) in PSM3_VARIABLE_MAP.items()} attributes = [a if a not in amap.keys() else amap[a] for a in attributes] attributes = list(set(attributes)) # remove duplicate values # required query-string parameters for request to PSM3 API params = { 'api_key': api_key, 'full_name': full_name, 'email': email, 'affiliation': affiliation, 'reason': PVLIB_PYTHON, 'mailing_list': 'false', 'wkt': 'POINT(%s %s)' % (longitude, latitude), 'names': names, 'attributes': ','.join(attributes), 'leap_day': str(leap_day).lower(), 'utc': 'false', 'interval': interval } # request CSV download from NREL PSM3 if any(prefix in names for prefix in ('tmy', 'tgy', 'tdy')): URL = TMY_URL elif interval in (5, 15): URL = PSM5MIN_URL else: URL = PSM_URL response = requests.get(URL, params=params, timeout=timeout) if not response.ok: # if the API key is rejected, then the response status will be 403 # Forbidden, and then the error is in the content and there is no JSON try: errors = response.json()['errors'] except JSONDecodeError: errors = response.content.decode('utf-8') raise requests.HTTPError(errors, response=response) # the CSV is in the response content as a UTF-8 bytestring # to use pandas we need to create a file buffer from the response fbuf = io.StringIO(response.content.decode('utf-8')) return parse_psm3(fbuf, map_variables) ","def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60, attributes=ATTRIBUTES, leap_day=False, full_name=PVLIB_PYTHON, affiliation=PVLIB_PYTHON, map_variables=None, timeout=30): """""" Retrieve NSRDB PSM3 timeseries weather data from the PSM3 API. The NSRDB is described in [1]_ and the PSM3 API is described in [2]_, [3]_, and [4]_. .. versionchanged:: 0.9.0 The function now returns a tuple where the first element is a dataframe and the second element is a dictionary containing metadata. Previous versions of this function had the return values switched. Parameters ---------- latitude : float or int in decimal degrees, between -90 and 90, north is positive longitude : float or int in decimal degrees, between -180 and 180, east is positive api_key : str NREL Developer Network API key email : str NREL API uses this to automatically communicate messages back to the user only if necessary names : str, default 'tmy' PSM3 API parameter specifing year or TMY variant to download, see notes below for options interval : int, {60, 5, 15, 30} interval size in minutes, must be 5, 15, 30 or 60. Only used for single-year requests (i.e., it is ignored for tmy/tgy/tdy requests). attributes : list of str, optional meteorological fields to fetch. If not specified, defaults to ``pvlib.iotools.psm3.ATTRIBUTES``. See references [2]_, [3]_, and [4]_ for lists of available fields. leap_day : boolean, default False include leap day in the results. Only used for single-year requests (i.e., it is ignored for tmy/tgy/tdy requests). full_name : str, default 'pvlib python' optional affiliation : str, default 'pvlib python' optional map_variables: boolean, optional When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable :const:`PSM3_VARIABLE_MAP`. timeout : int, default 30 time in seconds to wait for server response before timeout Returns ------- data : pandas.DataFrame timeseries data from NREL PSM3 metadata : dict metadata from NREL PSM3 about the record, see :func:`pvlib.iotools.parse_psm3` for fields Raises ------ requests.HTTPError if the request response status is not ok, then the ``'errors'`` field from the JSON response or any error message in the content will be raised as an exception, for example if the `api_key` was rejected or if the coordinates were not found in the NSRDB Notes ----- The required NREL developer key, `api_key`, is available for free by registering at the `NREL Developer Network `_. .. warning:: The ""DEMO_KEY"" `api_key` is severely rate limited and may result in rejected requests. The PSM3 API `names` parameter must be a single value from one of these lists: +-----------+-------------------------------------------------------------+ | Category | Allowed values | +===========+=============================================================+ | Year | 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, | | | 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, | | | 2018, 2019 | +-----------+-------------------------------------------------------------+ | TMY | tmy, tmy-2016, tmy-2017, tdy-2017, tgy-2017, | | | tmy-2018, tdy-2018, tgy-2018, tmy-2019, tdy-2019, tgy-2019 | +-----------+-------------------------------------------------------------+ .. warning:: PSM3 is limited to data found in the NSRDB, please consult the references below for locations with available data. Additionally, querying data with < 30-minute resolution uses a different API endpoint with fewer available fields (see [4]_). See Also -------- pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3 References ---------- .. [1] `NREL National Solar Radiation Database (NSRDB) `_ .. [2] `Physical Solar Model (PSM) v3 `_ .. [3] `Physical Solar Model (PSM) v3 TMY `_ .. [4] `Physical Solar Model (PSM) v3 - Five Minute Temporal Resolution `_ """""" # The well know text (WKT) representation of geometry notation is strict. # A POINT object is a string with longitude first, then the latitude, with # four decimals each, and exactly one space between them. longitude = ('%9.4f' % longitude).strip() latitude = ('%8.4f' % latitude).strip() # TODO: make format_WKT(object_type, *args) in tools.py # convert to string to accomodate integer years being passed in names = str(names) # convert pvlib names in attributes to psm3 convention (reverse mapping) # unlike psm3 columns, attributes are lower case and with underscores amap = {value: key.lower().replace(' ', '_') for (key, value) in PSM3_VARIABLE_MAP.items()} attributes = [a if a not in amap.keys() else amap[a] for a in attributes] attributes = list(set(attributes)) # remove duplicate values # required query-string parameters for request to PSM3 API params = { 'api_key': api_key, 'full_name': full_name, 'email': email, 'affiliation': affiliation, 'reason': PVLIB_PYTHON, 'mailing_list': 'false', 'wkt': 'POINT(%s %s)' % (longitude, latitude), 'names': names, 'attributes': ','.join(attributes), 'leap_day': str(leap_day).lower(), 'utc': 'false', 'interval': interval } # request CSV download from NREL PSM3 if any(prefix in names for prefix in ('tmy', 'tgy', 'tdy')): URL = TMY_URL elif interval in (5, 15): URL = PSM5MIN_URL else: URL = PSM_URL response = requests.get(URL, params=params, timeout=timeout) if not response.ok: # if the API key is rejected, then the response status will be 403 # Forbidden, and then the error is in the content and there is no JSON try: errors = response.json()['errors'] except JSONDecodeError: errors = response.content.decode('utf-8') raise requests.HTTPError(errors, response=response) # the CSV is in the response content as a UTF-8 bytestring # to use pandas we need to create a file buffer from the response fbuf = io.StringIO(response.content.decode('utf-8')) return parse_psm3(fbuf, map_variables) " 14480,"def test_task_hook_import_playbook(default_rules_collection): """"""Assures import_playbook includes are recognized."""""" playbook_path = 'test/playbook-import/playbook_parent.yml' runner = Runner( playbook_path, rules=default_rules_collection) results = runner.run() results_text = str(results) assert len(runner.lintables) == 2 assert len(results) == 2 # Assures we detected the issues from imported playbook assert 'Commands should not change things' in results_text assert 'unamed-task' in results_text assert 'All tasks should be named' in results_text ","def test_task_hook_import_playbook(default_rules_collection): """"""Assures import_playbook includes are recognized."""""" playbook_path = 'test/playbook-import/playbook_parent.yml' runner = Runner( playbook_path, rules=default_rules_collection) results = runner.run() results_text = str(results) assert len(runner.lintables) == 2 assert len(results) == 2 # Assures we detected the issues from imported playbook assert 'Commands should not change things' in results_text assert 'unnamed-task' in results_text assert 'All tasks should be named' in results_text " 7212,"def interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut): """"""Find the new grayscale level for a region using bilinear interpolation. Parameters ---------- image : ndarray Full image. xslice, yslice : array-like Indices of the region. map* : ndarray Mappings of greylevels from histograms. lut : ndarray Maps grayscale levels in image to histogram levels. Returns ------- out : ndarray Original image with the subregion replaced. Notes ----- This function calculates the new greylevel assignments of pixels within a submatrix of the image. This is done by a bilinear interpolation between four different mappings in order to eliminate boundary artifacts. """""" warnings.warn(""interpolate is deprecated and will be removed in version "" ""0.19. Please use the rivate function _interpolate "" ""instead."", category=FutureWarning, stacklevel=2) xslice = slice(xslice[0], xslice[-1] + 1) yslice = slice(yslice[0], yslice[-1] + 1) return _interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut) ","def interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut): """"""Find the new grayscale level for a region using bilinear interpolation. Parameters ---------- image : ndarray Full image. xslice, yslice : array-like Indices of the region. map* : ndarray Mappings of graylevels from histograms. lut : ndarray Maps grayscale levels in image to histogram levels. Returns ------- out : ndarray Original image with the subregion replaced. Notes ----- This function calculates the new greylevel assignments of pixels within a submatrix of the image. This is done by a bilinear interpolation between four different mappings in order to eliminate boundary artifacts. """""" warnings.warn(""interpolate is deprecated and will be removed in version "" ""0.19. Please use the rivate function _interpolate "" ""instead."", category=FutureWarning, stacklevel=2) xslice = slice(xslice[0], xslice[-1] + 1) yslice = slice(yslice[0], yslice[-1] + 1) return _interpolate(image, xslice, yslice, mapLU, mapRU, mapLB, mapRB, lut) " 30978,"def get_user_command(client, args): scim = verify_and_load_scim_data(args.get('scim')) scim_flat_data = map_scim(scim) user_id = scim_flat_data.get('id') username = scim_flat_data.get('userName') email = scim_flat_data.get('email') if not (user_id or username or email): raise Exception('You must provide either the id, email or username of the user') if user_id: res = client.get_user_profile(user_id) else: if username: user_term = username user_param = ""userName"" else: user_term = email user_param = ""emails"" res = client.search_user_profile(user_param, user_term) if res.status_code == 200: res_json = res.json() resources = res_json.get('Resources') if len(resources) > 0: resource = resources[0] user_id = resource.get('id') res = client.get_user_profile(user_id) else: res.status_code = 404 res_json['detail'] = ""User Not Found"" if res.status_code == 200: res_json = res.json() emails = res_json.get('emails') for email_dict in emails: if email_dict.get(""primary"") is True: email = email_dict.get(""value"") break generic_iam_context = OutputContext(success=True, iden=res_json.get('id'), email=email, username=res_json.get('userName'), details=res_json, active=res_json.get('active')) elif res.status_code == 404: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=user_id, email=email, username=username, errorCode=404, errorMessage=""User Not Found"", details=res_json.get('detail')) else: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=user_id, email=email, username=username, errorCode=res.status_code, errorMessage=res_json.get('detail'), details=res_json) generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)' outputs = { generic_iam_context_dt: generic_iam_context.data } readable_output = tableToMarkdown(name='Get Twic User:', t=generic_iam_context.data, headers=[""brand"", ""instanceName"", ""success"", ""active"", ""id"", ""username"", ""email"", ""errorCode"", ""errorMessage"", ""details""], removeNull=True ) return ( readable_output, outputs, generic_iam_context.data ) ","def get_user_command(client, args): scim = verify_and_load_scim_data(args.get('scim')) scim_flat_data = map_scim(scim) user_id = scim_flat_data.get('id') username = scim_flat_data.get('userName') email = scim_flat_data.get('email') if not (user_id or username or email): raise DemistoException('You must provide either the id, email or username of the user') if user_id: res = client.get_user_profile(user_id) else: if username: user_term = username user_param = ""userName"" else: user_term = email user_param = ""emails"" res = client.search_user_profile(user_param, user_term) if res.status_code == 200: res_json = res.json() resources = res_json.get('Resources') if len(resources) > 0: resource = resources[0] user_id = resource.get('id') res = client.get_user_profile(user_id) else: res.status_code = 404 res_json['detail'] = ""User Not Found"" if res.status_code == 200: res_json = res.json() emails = res_json.get('emails') for email_dict in emails: if email_dict.get(""primary"") is True: email = email_dict.get(""value"") break generic_iam_context = OutputContext(success=True, iden=res_json.get('id'), email=email, username=res_json.get('userName'), details=res_json, active=res_json.get('active')) elif res.status_code == 404: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=user_id, email=email, username=username, errorCode=404, errorMessage=""User Not Found"", details=res_json.get('detail')) else: res_json = res.json() generic_iam_context = OutputContext(success=False, iden=user_id, email=email, username=username, errorCode=res.status_code, errorMessage=res_json.get('detail'), details=res_json) generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)' outputs = { generic_iam_context_dt: generic_iam_context.data } readable_output = tableToMarkdown(name='Get Twic User:', t=generic_iam_context.data, headers=[""brand"", ""instanceName"", ""success"", ""active"", ""id"", ""username"", ""email"", ""errorCode"", ""errorMessage"", ""details""], removeNull=True ) return ( readable_output, outputs, generic_iam_context.data ) " 42991,"def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (ndarray): The state that the BS is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: ndarray: State where the two-mode operation has been applied """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret ","def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (ndarray): The state that the BS is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: array[complex]: state after application of the two-mode operation """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret " 5071,"def _auto_adjust_subplotpars( fig, renderer, shape, span_pairs, subplot_list, ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None): """""" Return a dict of subplot parameters to adjust spacing between subplots or ``None`` if resulting axes would have zero height or width. Note that this function ignores geometry information of subplot itself, but uses what is given by the *nrows_ncols* and *num1num2_list* parameters. Also, the results could be incorrect if some subplots have ``adjustable=datalim``. Parameters ---------- shape : tuple[int, int] Number of rows and of columns of the grid. span_pairs : list[tuple[slice, slice]] List of rowspans and colspans occupied by each subplot. subplot_list : list of subplots List of subplots that will be used to calculate optimal subplot_params. pad : float Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float Padding (height/width) between edges of adjacent subplots, as a fraction of the font size. Defaults to *pad*. rect : tuple[float, float, float, float] [left, bottom, right, top] in normalized (0, 1) figure coordinates. """""" rows, cols = shape font_size_inches = ( FontProperties(size=rcParams[""font.size""]).get_size_in_points() / 72) pad_inches = pad * font_size_inches vpad_inches = h_pad * font_size_inches if h_pad is not None else pad_inches hpad_inches = w_pad * font_size_inches if w_pad is not None else pad_inches if len(span_pairs) != len(subplot_list) or len(subplot_list) == 0: raise ValueError if rect is None: margin_left = margin_bottom = margin_right = margin_top = None else: margin_left, margin_bottom, _right, _top = rect margin_right = 1 - _right if _right else None margin_top = 1 - _top if _top else None vspaces = np.zeros((rows + 1, cols)) hspaces = np.zeros((rows, cols + 1)) if ax_bbox_list is None: ax_bbox_list = [ Bbox.union([ax.get_position(original=True) for ax in subplots]) for subplots in subplot_list] for subplots, ax_bbox, (rowspan, colspan) in zip( subplot_list, ax_bbox_list, span_pairs): if all(not ax.get_visible() for ax in subplots): continue bb = [] for ax in subplots: if ax.get_visible(): try: bb += [ax.get_tightbbox(renderer, for_layout_only=True)] except TypeError: bb += [ax.get_tightbbox(renderer)] tight_bbox_raw = Bbox.union(bb) tight_bbox = TransformedBbox(tight_bbox_raw, fig.transFigure.inverted()) hspaces[rowspan, colspan.start] += ax_bbox.xmin - tight_bbox.xmin # l hspaces[rowspan, colspan.stop] += tight_bbox.xmax - ax_bbox.xmax # r vspaces[rowspan.start, colspan] += tight_bbox.ymax - ax_bbox.ymax # t vspaces[rowspan.stop, colspan] += ax_bbox.ymin - tight_bbox.ymin # b fig_width_inch, fig_height_inch = fig.get_size_inches() # margins can be negative for axes with aspect applied, so use max(, 0) to # make them nonnegative. if not margin_left: margin_left = (max(hspaces[:, 0].max(), 0) + pad_inches / fig_width_inch) suplabel = fig._supylabel if suplabel and suplabel.get_in_layout(): rel_width = fig.transFigure.inverted().transform_bbox( suplabel.get_window_extent(renderer)).width margin_left += rel_width + pad_inches / fig_width_inch if not margin_right: margin_right = (max(hspaces[:, -1].max(), 0) + pad_inches / fig_width_inch) if not margin_top: margin_top = (max(vspaces[0, :].max(), 0) + pad_inches / fig_height_inch) if fig._suptitle and fig._suptitle.get_in_layout(): rel_height = fig.transFigure.inverted().transform_bbox( fig._suptitle.get_window_extent(renderer)).height margin_top += rel_height + pad_inches / fig_height_inch if not margin_bottom: margin_bottom = (max(vspaces[-1, :].max(), 0) + pad_inches / fig_height_inch) suplabel = fig._supxlabel if suplabel and suplabel.get_in_layout(): rel_height = fig.transFigure.inverted().transform_bbox( suplabel.get_window_extent(renderer)).height margin_bottom += rel_height + pad_inches / fig_height_inch if margin_left + margin_right >= 1: _api.warn_external('Tight layout not applied. The left and right ' 'margins cannot be made large enough to ' 'accommodate all axes decorations. ') return None if margin_bottom + margin_top >= 1: _api.warn_external('Tight layout not applied. The bottom and top ' 'margins cannot be made large enough to ' 'accommodate all axes decorations. ') return None kwargs = dict(left=margin_left, right=1 - margin_right, bottom=margin_bottom, top=1 - margin_top) if cols > 1: hspace = hspaces[:, 1:-1].max() + hpad_inches / fig_width_inch # axes widths: h_axes = (1 - margin_right - margin_left - hspace * (cols - 1)) / cols if h_axes < 0: _api.warn_external('Tight layout not applied. tight_layout ' 'cannot make axes width small enough to ' 'accommodate all axes decorations') return None else: kwargs[""wspace""] = hspace / h_axes if rows > 1: vspace = vspaces[1:-1, :].max() + vpad_inches / fig_height_inch v_axes = (1 - margin_top - margin_bottom - vspace * (rows - 1)) / rows if v_axes < 0: _api.warn_external('Tight layout not applied. tight_layout ' 'cannot make axes height small enough to ' 'accommodate all axes decorations') return None else: kwargs[""hspace""] = vspace / v_axes return kwargs ","def _auto_adjust_subplotpars( fig, renderer, shape, span_pairs, subplot_list, ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None): """""" Return a dict of subplot parameters to adjust spacing between subplots or ``None`` if resulting axes would have zero height or width. Note that this function ignores geometry information of subplot itself, but uses what is given by the *nrows_ncols* and *num1num2_list* parameters. Also, the results could be incorrect if some subplots have ``adjustable=datalim``. Parameters ---------- shape : tuple[int, int] Number of rows and columns of the grid. span_pairs : list[tuple[slice, slice]] List of rowspans and colspans occupied by each subplot. subplot_list : list of subplots List of subplots that will be used to calculate optimal subplot_params. pad : float Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float Padding (height/width) between edges of adjacent subplots, as a fraction of the font size. Defaults to *pad*. rect : tuple[float, float, float, float] [left, bottom, right, top] in normalized (0, 1) figure coordinates. """""" rows, cols = shape font_size_inches = ( FontProperties(size=rcParams[""font.size""]).get_size_in_points() / 72) pad_inches = pad * font_size_inches vpad_inches = h_pad * font_size_inches if h_pad is not None else pad_inches hpad_inches = w_pad * font_size_inches if w_pad is not None else pad_inches if len(span_pairs) != len(subplot_list) or len(subplot_list) == 0: raise ValueError if rect is None: margin_left = margin_bottom = margin_right = margin_top = None else: margin_left, margin_bottom, _right, _top = rect margin_right = 1 - _right if _right else None margin_top = 1 - _top if _top else None vspaces = np.zeros((rows + 1, cols)) hspaces = np.zeros((rows, cols + 1)) if ax_bbox_list is None: ax_bbox_list = [ Bbox.union([ax.get_position(original=True) for ax in subplots]) for subplots in subplot_list] for subplots, ax_bbox, (rowspan, colspan) in zip( subplot_list, ax_bbox_list, span_pairs): if all(not ax.get_visible() for ax in subplots): continue bb = [] for ax in subplots: if ax.get_visible(): try: bb += [ax.get_tightbbox(renderer, for_layout_only=True)] except TypeError: bb += [ax.get_tightbbox(renderer)] tight_bbox_raw = Bbox.union(bb) tight_bbox = TransformedBbox(tight_bbox_raw, fig.transFigure.inverted()) hspaces[rowspan, colspan.start] += ax_bbox.xmin - tight_bbox.xmin # l hspaces[rowspan, colspan.stop] += tight_bbox.xmax - ax_bbox.xmax # r vspaces[rowspan.start, colspan] += tight_bbox.ymax - ax_bbox.ymax # t vspaces[rowspan.stop, colspan] += ax_bbox.ymin - tight_bbox.ymin # b fig_width_inch, fig_height_inch = fig.get_size_inches() # margins can be negative for axes with aspect applied, so use max(, 0) to # make them nonnegative. if not margin_left: margin_left = (max(hspaces[:, 0].max(), 0) + pad_inches / fig_width_inch) suplabel = fig._supylabel if suplabel and suplabel.get_in_layout(): rel_width = fig.transFigure.inverted().transform_bbox( suplabel.get_window_extent(renderer)).width margin_left += rel_width + pad_inches / fig_width_inch if not margin_right: margin_right = (max(hspaces[:, -1].max(), 0) + pad_inches / fig_width_inch) if not margin_top: margin_top = (max(vspaces[0, :].max(), 0) + pad_inches / fig_height_inch) if fig._suptitle and fig._suptitle.get_in_layout(): rel_height = fig.transFigure.inverted().transform_bbox( fig._suptitle.get_window_extent(renderer)).height margin_top += rel_height + pad_inches / fig_height_inch if not margin_bottom: margin_bottom = (max(vspaces[-1, :].max(), 0) + pad_inches / fig_height_inch) suplabel = fig._supxlabel if suplabel and suplabel.get_in_layout(): rel_height = fig.transFigure.inverted().transform_bbox( suplabel.get_window_extent(renderer)).height margin_bottom += rel_height + pad_inches / fig_height_inch if margin_left + margin_right >= 1: _api.warn_external('Tight layout not applied. The left and right ' 'margins cannot be made large enough to ' 'accommodate all axes decorations. ') return None if margin_bottom + margin_top >= 1: _api.warn_external('Tight layout not applied. The bottom and top ' 'margins cannot be made large enough to ' 'accommodate all axes decorations. ') return None kwargs = dict(left=margin_left, right=1 - margin_right, bottom=margin_bottom, top=1 - margin_top) if cols > 1: hspace = hspaces[:, 1:-1].max() + hpad_inches / fig_width_inch # axes widths: h_axes = (1 - margin_right - margin_left - hspace * (cols - 1)) / cols if h_axes < 0: _api.warn_external('Tight layout not applied. tight_layout ' 'cannot make axes width small enough to ' 'accommodate all axes decorations') return None else: kwargs[""wspace""] = hspace / h_axes if rows > 1: vspace = vspaces[1:-1, :].max() + vpad_inches / fig_height_inch v_axes = (1 - margin_top - margin_bottom - vspace * (rows - 1)) / rows if v_axes < 0: _api.warn_external('Tight layout not applied. tight_layout ' 'cannot make axes height small enough to ' 'accommodate all axes decorations') return None else: kwargs[""hspace""] = vspace / v_axes return kwargs " 8795,"def test_bot_mixed_mode_removal(mockbot): """"""Ensure mixed mode types like ``-h+a`` are handled. Sopel 6.6.6 and older did not handle this correctly. .. seealso:: Github Issue #1575. """""" irc = IRCFactory(mockbot) irc.channel_joined('#test', ['Uvoice', 'Uop']) irc.mode_set('#test', '+qao', ['Uvoice', 'Uvoice', 'Uvoice']) assert mockbot.channels[""#test""].privileges[Identifier(""Uop"")] == 0 assert mockbot.channels[""#test""].privileges[Identifier(""Uvoice"")] == ( ADMIN + OWNER + OP), 'Uvoice got +q, +a, and +o modes' irc.mode_set('#test', '-o+o-qa+v', [ 'Uvoice', 'Uop', 'Uvoice', 'Uvoice', 'Uvoice']) assert mockbot.channels[""#test""].privileges[Identifier(""Uop"")] == OP, ( 'OP got +o only') assert mockbot.channels[""#test""].privileges[Identifier(""Uvoice"")] == VOICE, ( 'Uvoice got -o, -q, -a, then +v') ","def test_bot_mixed_mode_removal(mockbot): """"""Ensure mixed mode types like ``-h+a`` are handled. Sopel 6.6.6 and older did not handle this correctly. .. seealso:: GitHub issue #1575. """""" irc = IRCFactory(mockbot) irc.channel_joined('#test', ['Uvoice', 'Uop']) irc.mode_set('#test', '+qao', ['Uvoice', 'Uvoice', 'Uvoice']) assert mockbot.channels[""#test""].privileges[Identifier(""Uop"")] == 0 assert mockbot.channels[""#test""].privileges[Identifier(""Uvoice"")] == ( ADMIN + OWNER + OP), 'Uvoice got +q, +a, and +o modes' irc.mode_set('#test', '-o+o-qa+v', [ 'Uvoice', 'Uop', 'Uvoice', 'Uvoice', 'Uvoice']) assert mockbot.channels[""#test""].privileges[Identifier(""Uop"")] == OP, ( 'OP got +o only') assert mockbot.channels[""#test""].privileges[Identifier(""Uvoice"")] == VOICE, ( 'Uvoice got -o, -q, -a, then +v') " 35585,"def _densenet(arch: str, growth_rate: int, block_config: Tuple[int], num_init_features: int, pretrained: bool, progress: bool, **kwargs) -> DenseNet: model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) if pretrained: _load_state_dict(model, model_urls[arch], progress) return model ","def _densenet(arch: str, growth_rate: int, block_config: Tuple[int], num_init_features: int, pretrained: bool, progress: bool, **kwargs: Any) -> DenseNet: model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) if pretrained: _load_state_dict(model, model_urls[arch], progress) return model " 37453,"def _assemble_circuit( circuit: QuantumCircuit, run_config: RunConfig ) -> Tuple[QasmQobjExperiment, Optional[PulseLibrary]]: """"""Assemble one circuit. Args: circuit: circuit to assemble run_config: configuration of the runtime environment Returns: One experiment for the QasmQobj, and pulse library for pulse gates (which could be None) Raises: QiskitError: when the circuit has unit other than 'dt'. """""" if circuit.unit != 'dt': raise QiskitError(""Unable to assemble circuit with unit '{}', which must be 'dt'."" .format(circuit.unit)) # header data num_qubits = 0 memory_slots = 0 qubit_labels = [] clbit_labels = [] qreg_sizes = [] creg_sizes = [] clbit_list = [] for qreg in circuit.qregs: qreg_sizes.append([qreg.name, qreg.size]) for j in range(qreg.size): qubit_labels.append([qreg.name, j]) num_qubits += qreg.size for creg in circuit.cregs: creg_sizes.append([creg.name, creg.size]) for j in range(creg.size): clbit_labels.append([creg.name, j]) memory_slots += creg.size for creg in circuit.cregs: for bit in creg: clbit_list.append(bit) qubit_indices = {qubit: idx for idx, qubit in enumerate(circuit.qubits)} clbit_indices = {clbit: idx for idx, clbit in enumerate(circuit.clbits)} # TODO: why do we need creq_sizes and qreg_sizes in header # TODO: we need to rethink memory_slots as they are tied to classical bit metadata = circuit.metadata if metadata is None: metadata = {} header = QobjExperimentHeader(qubit_labels=qubit_labels, n_qubits=num_qubits, qreg_sizes=qreg_sizes, clbit_labels=clbit_labels, memory_slots=memory_slots, creg_sizes=creg_sizes, name=circuit.name, global_phase=float(circuit.global_phase), metadata=metadata) # TODO: why do we need n_qubits and memory_slots in both the header and the config config = QasmQobjExperimentConfig(n_qubits=num_qubits, memory_slots=memory_slots) calibrations, pulse_library = _assemble_pulse_gates(circuit, run_config) if calibrations: config.calibrations = calibrations # Convert conditionals from QASM-style (creg ?= int) to qobj-style # (register_bit ?= 1), by assuming device has unlimited register slots # (supported only for simulators). Map all measures to a register matching # their clbit_index, create a new register slot for every conditional gate # and add a bfunc to map the creg=val mask onto the gating register bit. is_conditional_experiment = any(op.condition for (op, qargs, cargs) in circuit.data) max_conditional_idx = 0 instructions = [] for op_context in circuit.data: instruction = op_context[0].assemble() # Add register attributes to the instruction qargs = op_context[1] cargs = op_context[2] if qargs: instruction.qubits = [qubit_indices[qubit] for qubit in qargs] if cargs: instruction.memory = [clbit_indices[clbit] for clbit in cargs] # If the experiment has conditional instructions, assume every # measurement result may be needed for a conditional gate. if instruction.name == ""measure"" and is_conditional_experiment: instruction.register = [clbit_indices[clbit] for clbit in cargs] # To convert to a qobj-style conditional, insert a bfunc prior # to the conditional instruction to map the creg ?= val condition # onto a gating register bit. if hasattr(instruction, '_condition'): ctrl_reg, ctrl_val = instruction._condition mask = 0 val = 0 if isinstance(ctrl_reg, Clbit): mask = (1 << clbit_list.index(ctrl_reg)) val = (ctrl_val & 1) << clbit_list.index(ctrl_reg) else: for clbit in clbit_list: if clbit in ctrl_reg: mask |= (1 << clbit_list.index(clbit)) val |= (((ctrl_val >> list(ctrl_reg).index(clbit)) & 1) << clbit_list.index(clbit)) conditional_reg_idx = memory_slots + max_conditional_idx conversion_bfunc = QasmQobjInstruction(name='bfunc', mask=""0x%X"" % mask, relation='==', val=""0x%X"" % val, register=conditional_reg_idx) instructions.append(conversion_bfunc) instruction.conditional = conditional_reg_idx max_conditional_idx += 1 # Delete condition attribute now that we have replaced it with # the conditional and bfunc del instruction._condition instructions.append(instruction) return (QasmQobjExperiment(instructions=instructions, header=header, config=config), pulse_library) ","def _assemble_circuit( circuit: QuantumCircuit, run_config: RunConfig ) -> Tuple[QasmQobjExperiment, Optional[PulseLibrary]]: """"""Assemble one circuit. Args: circuit: circuit to assemble run_config: configuration of the runtime environment Returns: One experiment for the QasmQobj, and pulse library for pulse gates (which could be None) Raises: QiskitError: when the circuit has unit other than 'dt'. """""" if circuit.unit != 'dt': raise QiskitError(""Unable to assemble circuit with unit '{}', which must be 'dt'."" .format(circuit.unit)) # header data num_qubits = 0 memory_slots = 0 qubit_labels = [] clbit_labels = [] qreg_sizes = [] creg_sizes = [] clbit_list = circuit.qubits for qreg in circuit.qregs: qreg_sizes.append([qreg.name, qreg.size]) for j in range(qreg.size): qubit_labels.append([qreg.name, j]) num_qubits += qreg.size for creg in circuit.cregs: creg_sizes.append([creg.name, creg.size]) for j in range(creg.size): clbit_labels.append([creg.name, j]) memory_slots += creg.size for creg in circuit.cregs: for bit in creg: clbit_list.append(bit) qubit_indices = {qubit: idx for idx, qubit in enumerate(circuit.qubits)} clbit_indices = {clbit: idx for idx, clbit in enumerate(circuit.clbits)} # TODO: why do we need creq_sizes and qreg_sizes in header # TODO: we need to rethink memory_slots as they are tied to classical bit metadata = circuit.metadata if metadata is None: metadata = {} header = QobjExperimentHeader(qubit_labels=qubit_labels, n_qubits=num_qubits, qreg_sizes=qreg_sizes, clbit_labels=clbit_labels, memory_slots=memory_slots, creg_sizes=creg_sizes, name=circuit.name, global_phase=float(circuit.global_phase), metadata=metadata) # TODO: why do we need n_qubits and memory_slots in both the header and the config config = QasmQobjExperimentConfig(n_qubits=num_qubits, memory_slots=memory_slots) calibrations, pulse_library = _assemble_pulse_gates(circuit, run_config) if calibrations: config.calibrations = calibrations # Convert conditionals from QASM-style (creg ?= int) to qobj-style # (register_bit ?= 1), by assuming device has unlimited register slots # (supported only for simulators). Map all measures to a register matching # their clbit_index, create a new register slot for every conditional gate # and add a bfunc to map the creg=val mask onto the gating register bit. is_conditional_experiment = any(op.condition for (op, qargs, cargs) in circuit.data) max_conditional_idx = 0 instructions = [] for op_context in circuit.data: instruction = op_context[0].assemble() # Add register attributes to the instruction qargs = op_context[1] cargs = op_context[2] if qargs: instruction.qubits = [qubit_indices[qubit] for qubit in qargs] if cargs: instruction.memory = [clbit_indices[clbit] for clbit in cargs] # If the experiment has conditional instructions, assume every # measurement result may be needed for a conditional gate. if instruction.name == ""measure"" and is_conditional_experiment: instruction.register = [clbit_indices[clbit] for clbit in cargs] # To convert to a qobj-style conditional, insert a bfunc prior # to the conditional instruction to map the creg ?= val condition # onto a gating register bit. if hasattr(instruction, '_condition'): ctrl_reg, ctrl_val = instruction._condition mask = 0 val = 0 if isinstance(ctrl_reg, Clbit): mask = (1 << clbit_list.index(ctrl_reg)) val = (ctrl_val & 1) << clbit_list.index(ctrl_reg) else: for clbit in clbit_list: if clbit in ctrl_reg: mask |= (1 << clbit_list.index(clbit)) val |= (((ctrl_val >> list(ctrl_reg).index(clbit)) & 1) << clbit_list.index(clbit)) conditional_reg_idx = memory_slots + max_conditional_idx conversion_bfunc = QasmQobjInstruction(name='bfunc', mask=""0x%X"" % mask, relation='==', val=""0x%X"" % val, register=conditional_reg_idx) instructions.append(conversion_bfunc) instruction.conditional = conditional_reg_idx max_conditional_idx += 1 # Delete condition attribute now that we have replaced it with # the conditional and bfunc del instruction._condition instructions.append(instruction) return (QasmQobjExperiment(instructions=instructions, header=header, config=config), pulse_library) " 42730,"def app_args(prog: str, description: str) -> argparse.ArgumentParser: """"""Add the rotki arguments to the argument parser and return it"""""" p = argparse.ArgumentParser( prog=prog, description=description, ) p.add_argument( '--sleep-secs', type=int, default=DEFAULT_SLEEP_SECS, help=""Seconds to sleep during the main loop"", ) p.add_argument( '--data-dir', help='The directory where all data and configs are placed', type=str, default=None, ) p.add_argument( '--api-host', help='The host on which the rest API will run', default='127.0.0.1', ) p.add_argument( '--rest-api-port', help='The port on which the rest API will run', type=int, default=5042, ) p.add_argument( '--websockets-api-port', help='The port on which the websockets API will run', type=int, default=5043, ) p.add_argument( '--api-cors', help='Comma separated list of domains for the API to accept cross origin requests.', default=""http://localhost:*/*"", type=str, ) p.add_argument( '--ethrpc-port', help=""The port on which to communicate with an ethereum client's RPC."", default=8545, ) p.add_argument( '--logfile', help='The name of the file to write log entries to', default='rotkehlchen.log', ) p.add_argument( '--logtarget', help='Choose where logging entries will be sent. Valid values are ""file and ""stdout""', choices=['stdout', 'file'], default='file', ) p.add_argument( '--loglevel', help='Choose the logging level', choices=['trace', 'debug', 'info', 'warning', 'error', 'critical'], default='debug', ) p.add_argument( '--logfromothermodules', help=( 'If given then logs from all imported modules that use the ' 'logging system will also be visible.' ), action='store_true', ) p.add_argument( '--max-size-in-mb-all-logs', help='This is the maximum size in megabytes that will be used for all rotki logs', default=DEFAULT_MAX_LOG_SIZE_IN_MB, type=int, ) p.add_argument( '--max-logfiles-num', help='This is the maximum number of logfiles to keep', default=DEFAULT_MAX_LOG_BACKUP_FILES, type=int, ) p.add_argument( '--sqlite-instructions', help='Instructions per sqlite context switch. Should be a positive integer or zero to disable.', # noqa: E501 default=DEFAULT_SQL_VM_INSTRUCTIONS_CB, type=_positive_int_or_zero, ) p.add_argument( 'version', help='Shows the rotkehlchen version', action=CommandAction, ) return p ","def app_args(prog: str, description: str) -> argparse.ArgumentParser: """"""Add the rotki arguments to the argument parser and return it"""""" p = argparse.ArgumentParser( prog=prog, description=description, ) p.add_argument( '--sleep-secs', type=int, default=DEFAULT_SLEEP_SECS, help='Seconds to sleep during the main loop', ) p.add_argument( '--data-dir', help='The directory where all data and configs are placed', type=str, default=None, ) p.add_argument( '--api-host', help='The host on which the rest API will run', default='127.0.0.1', ) p.add_argument( '--rest-api-port', help='The port on which the rest API will run', type=int, default=5042, ) p.add_argument( '--websockets-api-port', help='The port on which the websockets API will run', type=int, default=5043, ) p.add_argument( '--api-cors', help='Comma separated list of domains for the API to accept cross origin requests.', default=""http://localhost:*/*"", type=str, ) p.add_argument( '--ethrpc-port', help=""The port on which to communicate with an ethereum client's RPC."", default=8545, ) p.add_argument( '--logfile', help='The name of the file to write log entries to', default='rotkehlchen.log', ) p.add_argument( '--logtarget', help='Choose where logging entries will be sent. Valid values are ""file and ""stdout""', choices=['stdout', 'file'], default='file', ) p.add_argument( '--loglevel', help='Choose the logging level', choices=['trace', 'debug', 'info', 'warning', 'error', 'critical'], default='debug', ) p.add_argument( '--logfromothermodules', help=( 'If given then logs from all imported modules that use the ' 'logging system will also be visible.' ), action='store_true', ) p.add_argument( '--max-size-in-mb-all-logs', help='This is the maximum size in megabytes that will be used for all rotki logs', default=DEFAULT_MAX_LOG_SIZE_IN_MB, type=int, ) p.add_argument( '--max-logfiles-num', help='This is the maximum number of logfiles to keep', default=DEFAULT_MAX_LOG_BACKUP_FILES, type=int, ) p.add_argument( '--sqlite-instructions', help='Instructions per sqlite context switch. Should be a positive integer or zero to disable.', # noqa: E501 default=DEFAULT_SQL_VM_INSTRUCTIONS_CB, type=_positive_int_or_zero, ) p.add_argument( 'version', help='Shows the rotkehlchen version', action=CommandAction, ) return p " 49950,"def grabclipboard(): if sys.platform == ""darwin"": fh, filepath = tempfile.mkstemp("".jpg"") os.close(fh) commands = [ 'set theFile to (open for access POSIX file ""' + filepath + '"" with write permission)', ""try"", "" write (the clipboard as JPEG picture) to theFile"", ""end try"", ""close access theFile"", ] script = [""osascript""] for command in commands: script += [""-e"", command] subprocess.call(script) im = None if os.stat(filepath).st_size != 0: im = Image.open(filepath) im.load() os.unlink(filepath) return im elif sys.platform == ""win32"": fmt, data = Image.core.grabclipboard_win32() if fmt == ""file"": # CF_HDROP import struct o = struct.unpack_from(""I"", data)[0] if data[16] != 0: files = data[o:].decode(""utf-16le"").split(""\0"") return files[: files.index("""")] else: files = data[o:].decode(""mbcs"").split(""\0"") return files[: files.index("""")] if isinstance(data, bytes): import io data = io.BytesIO(data) if fmt == ""png"": from . import PngImagePlugin return PngImagePlugin.PngImageFile(data) elif fmt == ""DIB"": from . import BmpImagePlugin return BmpImagePlugin.DibImageFile(data) return None else: raise NotImplementedError(""ImageGrab.grabclipboard() is macOS and Windows only"") ","def grabclipboard(): if sys.platform == ""darwin"": fh, filepath = tempfile.mkstemp("".jpg"") os.close(fh) commands = [ 'set theFile to (open for access POSIX file ""' + filepath + '"" with write permission)', ""try"", "" write (the clipboard as JPEG picture) to theFile"", ""end try"", ""close access theFile"", ] script = [""osascript""] for command in commands: script += [""-e"", command] subprocess.call(script) im = None if os.stat(filepath).st_size != 0: im = Image.open(filepath) im.load() os.unlink(filepath) return im elif sys.platform == ""win32"": fmt, data = Image.core.grabclipboard_win32() if fmt == ""file"": # CF_HDROP import struct o = struct.unpack_from(""I"", data)[0] if data[16] != 0: files = data[o:].decode(""utf-16le"").split(""\0"") else: files = data[o:].decode(""mbcs"").split(""\0"") return files[: files.index("""")] if isinstance(data, bytes): import io data = io.BytesIO(data) if fmt == ""png"": from . import PngImagePlugin return PngImagePlugin.PngImageFile(data) elif fmt == ""DIB"": from . import BmpImagePlugin return BmpImagePlugin.DibImageFile(data) return None else: raise NotImplementedError(""ImageGrab.grabclipboard() is macOS and Windows only"") " 49462,"def main(): module = KatelloContentExportModule( foreman_spec=dict( repository=dict(type='entity', flat_name='id', scope=['product']), product=dict(type='entity', scope=['organization']), chunk_size_gb=dict(required=False, type='int'), from_history_id=dict(required=False, type='str'), ), argument_spec=dict( incremental=dict(required=False, type='bool'), ), ) with module.api_connection(): module.auto_lookup_entities() incremental = module.params['incremental'] endpoint = 'content_export_incrementals' if incremental else 'content_exports' if module.params.get('from_history_id') and incremental is not True: module.fail_json(msg='from_history_id is only valid for incremental exports') payload = _flatten_entity(module.foreman_params, module.foreman_spec) task = module.resource_action(endpoint, 'repository', payload) module.exit_json(task=task) ","def main(): module = KatelloContentExportModule( foreman_spec=dict( repository=dict(type='entity', flat_name='id', scope=['product'], required=True), product=dict(type='entity', scope=['organization'], required=True), chunk_size_gb=dict(required=False, type='int'), from_history_id=dict(required=False, type='str'), ), argument_spec=dict( incremental=dict(required=False, type='bool'), ), ) with module.api_connection(): module.auto_lookup_entities() incremental = module.params['incremental'] endpoint = 'content_export_incrementals' if incremental else 'content_exports' if module.params.get('from_history_id') and incremental is not True: module.fail_json(msg='from_history_id is only valid for incremental exports') payload = _flatten_entity(module.foreman_params, module.foreman_spec) task = module.resource_action(endpoint, 'repository', payload) module.exit_json(task=task) " 43651,"def spin2(n_electrons, n_orbitals, mapping=""jordan_wigner""): r""""""Computes the total spin operator :math:`\hat{S}^2`. The total spin operator :math:`\hat{S}^2` is given by .. math:: \hat{S}^2 = \frac{3}{4}N + \sum_{ \bm{\alpha}, \bm{\beta}, \bm{\gamma}, \bm{\delta} } \langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert \bm{\gamma}, \bm{\delta} \rangle ~ \hat{c}_\bm{\alpha}^\dagger \hat{c}_\bm{\beta}^\dagger \hat{c}_\bm{\gamma} \hat{c}_\bm{\delta}, where the two-particle matrix elements are computedas, .. math:: \langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert \bm{\gamma}, \bm{\delta} \rangle = && \delta_{\alpha,\delta} \delta_{\beta,\gamma} \\ && \times \left( \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}+1} \delta_{s_{z_\beta}, s_{z_\gamma}-1} + \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}-1} \delta_{s_{z_\beta}, s_{z_\gamma}+1} + s_{z_\alpha} s_{z_\beta} \delta_{s_{z_\alpha}, s_{z_\delta}} \delta_{s_{z_\beta}, s_{z_\gamma}} \right). In the equations above :math:`N` is the number of electrons, :math:`\alpha` refer to the quantum numbers of the spatial wave function and :math:`s_{z_\alpha}` is the spin projection of the single-particle state :math:`\vert \bm{\alpha} \rangle \equiv \vert \alpha, s_{z_\alpha} \rangle`. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the particle creation and annihilation operators, respectively. Args: n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons' is the number of active electrons. n_orbitals (int): Number of orbitals. If an active space is defined, 'n_orbitals' is the number of active orbitals. mapping (str): Specifies the transformation to map the fermionic operator to the Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``. Returns: pennylane.Hamiltonian: the total spin observable :math:`\hat{S}^2` **Example** >>> n_electrons = 2 >>> n_orbitals = 2 >>> S2 = spin2(n_electrons, n_orbitals, mapping=""jordan_wigner"") >>> print(S2) (0.75) [I0] + (0.375) [Z1] + (-0.375) [Z0 Z1] + (0.125) [Z0 Z2] + (0.375) [Z0] + (-0.125) [Z0 Z3] + (-0.125) [Z1 Z2] + (0.125) [Z1 Z3] + (0.375) [Z2] + (0.375) [Z3] + (-0.375) [Z2 Z3] + (0.125) [Y0 X1 Y2 X3] + (0.125) [Y0 Y1 X2 X3] + (0.125) [Y0 Y1 Y2 Y3] + (-0.125) [Y0 X1 X2 Y3] + (-0.125) [X0 Y1 Y2 X3] + (0.125) [X0 X1 X2 X3] + (0.125) [X0 X1 Y2 Y3] + (0.125) [X0 Y1 X2 Y3] """""" if n_electrons <= 0: raise ValueError( ""'n_electrons' must be greater than 0; got for 'n_electrons' {}"".format(n_electrons) ) if n_orbitals <= 0: raise ValueError( ""'n_orbitals' must be greater than 0; got for 'n_orbitals' {}"".format(n_orbitals) ) sz = np.where(np.arange(2 * n_orbitals) % 2 == 0, 0.5, -0.5) table = _spin2_matrix_elements(sz) return observable(table, init_term=3 / 4 * n_electrons, mapping=mapping) ","def spin2(n_electrons, n_orbitals, mapping=""jordan_wigner""): r""""""Computes the total spin operator :math:`\hat{S}^2`. The total spin operator :math:`\hat{S}^2` is given by .. math:: \hat{S}^2 = \frac{3}{4}N + \sum_{ \bm{\alpha}, \bm{\beta}, \bm{\gamma}, \bm{\delta} } \langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert \bm{\gamma}, \bm{\delta} \rangle ~ \hat{c}_\bm{\alpha}^\dagger \hat{c}_\bm{\beta}^\dagger \hat{c}_\bm{\gamma} \hat{c}_\bm{\delta}, where the two-particle matrix elements are computed as, .. math:: \langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert \bm{\gamma}, \bm{\delta} \rangle = && \delta_{\alpha,\delta} \delta_{\beta,\gamma} \\ && \times \left( \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}+1} \delta_{s_{z_\beta}, s_{z_\gamma}-1} + \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}-1} \delta_{s_{z_\beta}, s_{z_\gamma}+1} + s_{z_\alpha} s_{z_\beta} \delta_{s_{z_\alpha}, s_{z_\delta}} \delta_{s_{z_\beta}, s_{z_\gamma}} \right). In the equations above :math:`N` is the number of electrons, :math:`\alpha` refer to the quantum numbers of the spatial wave function and :math:`s_{z_\alpha}` is the spin projection of the single-particle state :math:`\vert \bm{\alpha} \rangle \equiv \vert \alpha, s_{z_\alpha} \rangle`. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the particle creation and annihilation operators, respectively. Args: n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons' is the number of active electrons. n_orbitals (int): Number of orbitals. If an active space is defined, 'n_orbitals' is the number of active orbitals. mapping (str): Specifies the transformation to map the fermionic operator to the Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``. Returns: pennylane.Hamiltonian: the total spin observable :math:`\hat{S}^2` **Example** >>> n_electrons = 2 >>> n_orbitals = 2 >>> S2 = spin2(n_electrons, n_orbitals, mapping=""jordan_wigner"") >>> print(S2) (0.75) [I0] + (0.375) [Z1] + (-0.375) [Z0 Z1] + (0.125) [Z0 Z2] + (0.375) [Z0] + (-0.125) [Z0 Z3] + (-0.125) [Z1 Z2] + (0.125) [Z1 Z3] + (0.375) [Z2] + (0.375) [Z3] + (-0.375) [Z2 Z3] + (0.125) [Y0 X1 Y2 X3] + (0.125) [Y0 Y1 X2 X3] + (0.125) [Y0 Y1 Y2 Y3] + (-0.125) [Y0 X1 X2 Y3] + (-0.125) [X0 Y1 Y2 X3] + (0.125) [X0 X1 X2 X3] + (0.125) [X0 X1 Y2 Y3] + (0.125) [X0 Y1 X2 Y3] """""" if n_electrons <= 0: raise ValueError( ""'n_electrons' must be greater than 0; got for 'n_electrons' {}"".format(n_electrons) ) if n_orbitals <= 0: raise ValueError( ""'n_orbitals' must be greater than 0; got for 'n_orbitals' {}"".format(n_orbitals) ) sz = np.where(np.arange(2 * n_orbitals) % 2 == 0, 0.5, -0.5) table = _spin2_matrix_elements(sz) return observable(table, init_term=3 / 4 * n_electrons, mapping=mapping) " 14503,"def test_attach_calendar(runner): runner = runner(days=2) result = runner.invoke(main_khal, ['printcalendars']) assert set(result.output.split('\n')[:3]) == {['one', 'two', 'three']} assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-a', 'one']) assert result.output == 'one\n' assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-d', 'one']) assert set(result.output.split('\n')[:2]) == {['two', 'three']} assert not result.exception ","def test_attach_calendar(runner): runner = runner(days=2) result = runner.invoke(main_khal, ['printcalendars']) assert set(result.output.split('\n')[:3]) == {'one', 'two', 'three'} assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-a', 'one']) assert result.output == 'one\n' assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-d', 'one']) assert set(result.output.split('\n')[:2]) == {['two', 'three']} assert not result.exception " 56659,"def load(config_file): """"""legacy function to load openlibary config. The loaded config will be available via runtime_config var in this module. This doesn't affect the global config. WARNING: This function is deprecated, please use load_config instead. """""" # for historic reasons global runtime_config runtime_config = yaml.safe_load(open(config_file)) ","def load(config_file): """"""legacy function to load openlibary config. The loaded config will be available via runtime_config var in this module. This doesn't affect the global config. WARNING: This function is deprecated, please use load_config instead. """""" # for historic reasons global runtime_config with open(config_file) as in_file: runtime_config = yaml.safe_load(in_file) " 43689,"def edge_driver(graph, reward): r""""""Returns the edge-driver cost Hamiltonian component. Given some graph, :math:`G`, this method will return a Hamiltonian that assigns lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints. See usage details for more information. Args: graph (nx.Graph): The graph on which the Hamiltonian is defined reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian Returns: .Hamiltonian **Example** >>> graph = nx.Graph([(0, 1), (1, 2)]) >>> hamiltonian = qaoa.edge_driver(graph, [""11"", ""10"", ""01""]) >>> print(hamiltonian) (0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2] ..UsageDetails:: The goal of many combinatorial problems that can be solved with QAOA is to find a `Graph colouring `__ of some supplied graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class of graph colouring problems that only admit two colours, as we can easily encode these two colours using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`, :math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints. When constructing QAOA cost functions, one must ""penalize"" certain states of the graph, and ""reward"" others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour pairs (which each describe a possible state of a graph edge), the `edge_driver` method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example, given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`, the `edge_driver` method will output the following Hamiltonian: ..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big) where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states :math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state :math:`|11\rangle`. .. Note:: If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of ""order"" of edge endpoints, so these two states are effectively the same. """""" allowed = [""00"", ""01"", ""10"", ""11""] if not all([e in allowed for e in reward]): raise ValueError(""Encountered invalid entry in 'reward', expected 2-bit bitstrings."") if ""01"" in reward and ""10"" not in reward or ""10"" in reward and ""01"" not in reward: raise ValueError( ""'reward' cannot contain either '10' or '01', must contain neither or both."" ) if not isinstance(graph, nx.Graph): raise ValueError(""Input graph must be a nx.Graph, got {}"".format(type(graph).__name__)) coeffs = [] ops = [] if len(reward) == 0 or len(reward) == 4: coeffs = [1 for _ in graph.nodes] ops = [qml.Identity(v) for v in graph.nodes] else: reward = list(set(reward) - {""01""}) sign = -1 if len(reward) == 2: reward = list({""00"", ""10"", ""11""} - set(reward)) sign = 1 reward = reward[0] if reward == ""00"": for e in graph.edges: coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign]) ops.extend( [qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])] ) if reward == ""10"": for e in graph.edges: coeffs.append(-0.5 * sign) ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1])) if reward == ""11"": for e in graph.edges: coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign]) ops.extend( [qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])] ) return qml.Hamiltonian(coeffs, ops) ","def edge_driver(graph, reward): r""""""Returns the edge-driver cost Hamiltonian component. Given some graph, :math:`G`, this method will return a Hamiltonian that assigns lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints. See usage details for more information. Args: graph (nx.Graph): The graph on which the Hamiltonian is defined reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian Returns: .Hamiltonian **Example** >>> graph = nx.Graph([(0, 1), (1, 2)]) >>> hamiltonian = qaoa.edge_driver(graph, [""11"", ""10"", ""01""]) >>> print(hamiltonian) (0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2] ..UsageDetails:: The goal of many combinatorial problems that can be solved with QAOA is to find a `Graph colouring `__ of some supplied graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class of graph colouring problems that only admit two colours, as we can easily encode these two colours using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`, :math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints. When constructing QAOA cost functions, one must ""penalize"" certain states of the graph, and ""reward"" others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour pairs (which each describe a possible state of a graph edge), the `edge_driver` method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example, given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`, the ``edge_driver()`` function will output the following Hamiltonian: ..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big) where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states :math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state :math:`|11\rangle`. .. Note:: If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of ""order"" of edge endpoints, so these two states are effectively the same. """""" allowed = [""00"", ""01"", ""10"", ""11""] if not all([e in allowed for e in reward]): raise ValueError(""Encountered invalid entry in 'reward', expected 2-bit bitstrings."") if ""01"" in reward and ""10"" not in reward or ""10"" in reward and ""01"" not in reward: raise ValueError( ""'reward' cannot contain either '10' or '01', must contain neither or both."" ) if not isinstance(graph, nx.Graph): raise ValueError(""Input graph must be a nx.Graph, got {}"".format(type(graph).__name__)) coeffs = [] ops = [] if len(reward) == 0 or len(reward) == 4: coeffs = [1 for _ in graph.nodes] ops = [qml.Identity(v) for v in graph.nodes] else: reward = list(set(reward) - {""01""}) sign = -1 if len(reward) == 2: reward = list({""00"", ""10"", ""11""} - set(reward)) sign = 1 reward = reward[0] if reward == ""00"": for e in graph.edges: coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign]) ops.extend( [qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])] ) if reward == ""10"": for e in graph.edges: coeffs.append(-0.5 * sign) ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1])) if reward == ""11"": for e in graph.edges: coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign]) ops.extend( [qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])] ) return qml.Hamiltonian(coeffs, ops) " 39421,"def download_particles_lethe(load=True): # pragma: no cover """"""Download a particles dataset generated by lethe. See `PyVista discussions #1984 `_ Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.UnstructuredGrid or str DataSet or filename depending on ``load``. Examples -------- Download the particles dataset and plot it after generating glyphs. >>> from pyvista import examples >>> particles = examples.download_particles_lethe() >>> particles.plot( ... render_points_as_spheres=True, ... style='points', ... scalars='Velocity', ... background='w', ... scalar_bar_args={'color': 'k'}, ... cmap='bwr' ... ) """""" return _download_and_read('lethe/result_particles.20000.0000.vtu', load=load) ","def download_particles_lethe(load=True): # pragma: no cover """"""Download a particles dataset generated by `lethe `_ . See `PyVista discussions #1984 `_ Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.UnstructuredGrid or str DataSet or filename depending on ``load``. Examples -------- Download the particles dataset and plot it after generating glyphs. >>> from pyvista import examples >>> particles = examples.download_particles_lethe() >>> particles.plot( ... render_points_as_spheres=True, ... style='points', ... scalars='Velocity', ... background='w', ... scalar_bar_args={'color': 'k'}, ... cmap='bwr' ... ) """""" return _download_and_read('lethe/result_particles.20000.0000.vtu', load=load) " 32747,"def _w_makeRecord(func, instance, args, kwargs): record = func(*args, **kwargs) # add correlation identifiers to LogRecord trace_id, span_id = correlation.get_correlation_ids() if trace_id: record.trace_id = trace_id record.span_id = span_id else: record.trace_id = '' record.span_id = '' return record ","def _w_makeRecord(func, instance, args, kwargs): record = func(*args, **kwargs) # add correlation identifiers to LogRecord trace_id, span_id = correlation.get_correlation_ids() if trace_id: record.trace_id = trace_id record.span_id = span_id else: record.trace_id = '' record.span_id = 0 return record " 10817,"def custom_reduce(cls, states): """"""For customizing object serialization in `__reduce__`. Object states provided here is used as keyword arguments to `._rebuild()` class method. Parameters ---------- states : dict Dictionary of object states to be serialized. Returns ------- result : tuple This tuple conforms to the return type requirement for `__reduce__`. """""" return custom_rebuild, (_CustomPickled(cls, states),) ","def custom_reduce(cls, states): """"""For customizing object serialization in `__reduce__`. Object states provided here are used as keyword arguments to the `._rebuild()` class method. Parameters ---------- states : dict Dictionary of object states to be serialized. Returns ------- result : tuple This tuple conforms to the return type requirement for `__reduce__`. """""" return custom_rebuild, (_CustomPickled(cls, states),) " 49517,"def multidimensional_deconfliction(association_set): """"""Solves the Multidimensional Assignment Problem (MAP) The assignment problem becomes more complex when time is added as a dimension. This basic solution finds all the conflicts in an association set and then creates a matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a simple 2D assignment problem. Therefore, each object will only ever be assigned to one other at any one time. In the case of an association that only partially overlaps, the time range of the ""weaker"" one (the one eliminated by assign2D) will be trimmed until there is no conflict. Due to the possibility of more than two conflicting associations at the same time, this algorithm is recursive, but it is not expected many (if any) recursions will be required for most uses. Parameters ---------- association_set: The :class:`AssociationSet` to de-conflict Returns ------- : :class:`AssociationSet` The association set without contradictory associations """""" # Check if there are any conflicts no_conflicts = True for assoc1 in association_set: for assoc2 in association_set: if conflicts(assoc1, assoc2): no_conflicts = False if no_conflicts: return association_set objects = list(association_set.object_set) length = len(objects) totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total for association in association_set.associations: if len(association.objects) != 2: raise ValueError(""Supplied set must only contain pairs of associated objects"") obj_indices = [objects.index(list(association.objects)[0]), objects.index(list(association.objects)[1])] totals[obj_indices[0], obj_indices[1]] = association.time_range.duration.total_seconds() make_symmetric(totals) totals = numpy.rint(totals).astype(int) numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself solved_2d = assign2D(totals, maximize=True)[1] winning_indices = [] # Pairs that are chosen by assign2D for i in range(length): if i != solved_2d[i]: winning_indices.append([i, solved_2d[i]]) cleaned_set = AssociationSet() if len(winning_indices) == 0: raise ValueError(""Problem unsolvable using this method"") for winner in winning_indices: assoc = association_set.associations_including_objects({objects[winner[0]], objects[winner[1]]}) cleaned_set.add(assoc) association_set.remove(assoc) # Recursive step runners_up = set() for assoc1 in association_set.associations: for assoc2 in association_set.associations: if conflicts(assoc1, assoc2): runners_up = multidimensional_deconfliction(association_set).associations # At this point, none of association_set should conflict with one another for runner_up in runners_up: for winner in cleaned_set: if conflicts(runner_up, winner): runner_up.time_range.minus(winner.time_range) if runner_up.time_range is not None: cleaned_set.add(runner_up) else: runners_up.remove(runner_up) return cleaned_set ","def multidimensional_deconfliction(association_set): """"""Solves the Multidimensional Assignment Problem (MAP) The assignment problem becomes more complex when time is added as a dimension. This basic solution finds all the conflicts in an association set and then creates a matrix of sums of conflicts in seconds, which is then passed to assign2D to solve as a simple 2D assignment problem. Therefore, each object will only ever be assigned to one other at any one time. In the case of an association that only partially overlaps, the time range of the ""weaker"" one (the one eliminated by assign2D) will be trimmed until there is no conflict. Due to the possibility of more than two conflicting associations at the same time, this algorithm is recursive, but it is not expected many (if any) recursions will be required for most uses. Parameters ---------- association_set: The :class:`AssociationSet` to de-conflict Returns ------- : :class:`AssociationSet` The association set without contradictory associations """""" # Check if there are any conflicts no_conflicts = True for assoc1 in association_set: for assoc2 in association_set: if conflicts(assoc1, assoc2): no_conflicts = False if no_conflicts: return association_set objects = list(association_set.object_set) length = len(objects) totals = numpy.zeros((length, length)) # Time objects i and j are associated for in total for association in association_set.associations: if len(association.objects) != 2: raise ValueError(""Supplied set must only contain pairs of associated objects"") i, j = (objects.index(object_) for object_ in association.objects) totals[i, j] = association.time_range.duration.total_seconds() make_symmetric(totals) totals = numpy.rint(totals).astype(int) numpy.fill_diagonal(totals, 0) # Don't want to count associations of an object with itself solved_2d = assign2D(totals, maximize=True)[1] winning_indices = [] # Pairs that are chosen by assign2D for i in range(length): if i != solved_2d[i]: winning_indices.append([i, solved_2d[i]]) cleaned_set = AssociationSet() if len(winning_indices) == 0: raise ValueError(""Problem unsolvable using this method"") for winner in winning_indices: assoc = association_set.associations_including_objects({objects[winner[0]], objects[winner[1]]}) cleaned_set.add(assoc) association_set.remove(assoc) # Recursive step runners_up = set() for assoc1 in association_set.associations: for assoc2 in association_set.associations: if conflicts(assoc1, assoc2): runners_up = multidimensional_deconfliction(association_set).associations # At this point, none of association_set should conflict with one another for runner_up in runners_up: for winner in cleaned_set: if conflicts(runner_up, winner): runner_up.time_range.minus(winner.time_range) if runner_up.time_range is not None: cleaned_set.add(runner_up) else: runners_up.remove(runner_up) return cleaned_set " 28313,"def add_data_to_dynamic_columns( conn: ConnectionPlus, row_id: int, data: Mapping[str, Any], table_name: str = ""runs"" ) -> None: """""" Add columns from keys and insert values. (updates if exists, create otherwise) Note that None is not a valid value, and keys should be valid SQLite column names (i.e. contain only alphanumeric characters and underscores). Args: - conn: the connection to the sqlite database - row_id: the row to add the metadata at - data: the data to add - table_name: the table to add to, defaults to runs """""" try: insert_data_in_dynamic_columns(conn, row_id, table_name, data) except sqlite3.OperationalError as e: # this means that the column already exists # so just insert the new value if str(e).startswith(""duplicate""): update_columns(conn, row_id, table_name, data) else: raise e ","def add_data_to_dynamic_columns( conn: ConnectionPlus, row_id: int, data: Mapping[str, Any], table_name: str = ""runs"" ) -> None: """""" Add columns from keys and insert values. (updates if exists, creates otherwise) Note that None is not a valid value, and keys should be valid SQLite column names (i.e. contain only alphanumeric characters and underscores). Args: - conn: the connection to the sqlite database - row_id: the row to add the metadata at - data: the data to add - table_name: the table to add to, defaults to runs """""" try: insert_data_in_dynamic_columns(conn, row_id, table_name, data) except sqlite3.OperationalError as e: # this means that the column already exists # so just insert the new value if str(e).startswith(""duplicate""): update_columns(conn, row_id, table_name, data) else: raise e " 52286,"def get_parser(): parser = SCTArgumentParser( description='Compute SNR using methods described in [Dietrich et al., Measurement of' ' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel ' 'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].' ) mandatoryArguments = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatoryArguments.add_argument( '-i', required=True, help='3D or 4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz', metavar=Metavar.file) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( '-m', help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz', metavar=Metavar.file, default='') optional.add_argument( '-m-noise', help=""Binary (or weighted) mask within which noise will be calculated. Only valid for '-method single'."", metavar=Metavar.file, default='') optional.add_argument( '-method', help='R|Method to use to compute the SNR (default: diff):\n' ""- diff: Substract two volumes (defined by -vol) and estimate noise variance within the ROI "" ""(flag '-m' is required). Requires a 4D volume.\n"" ""- mult: Estimate noise variance over time across volumes specified with '-vol'. Requires a 4D volume.\n"" ""- single: Estimates noise variance in a 5x5 square at the corner of the image, and average the mean "" ""signal inside the ROI specified by flag '-m'. The variance and mean are corrected for Rayleigh "" ""distributions. This corresponds to the cases SNRstd and SNRmean in the Dietrich et al. article. Uses a "" ""3D or a 4D volume. If a 4D volume is input, the volume to compute SNR on is specified by '-vol'."", choices=('diff', 'mult', 'single'), default='diff') optional.add_argument( '-vol', help=""Volumes to compute SNR from. Separate with ',' (Example: '-vol 0,1'), or select range "" ""using ':' (Example: '-vol 2:50'). By default, all volumes in are selected, except if '-method single' "" ""in which case the first volume is selected."", metavar=Metavar.str, default='') optional.add_argument( '-r', type=int, help='Remove temporary files.', default=1, choices=(0, 1)) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"") optional.add_argument( '-o', metavar=Metavar.str, type=str, default=None, help=""File name to write the computed SNR to."" ) return parser ","def get_parser(): parser = SCTArgumentParser( description='Compute SNR using methods described in [Dietrich et al., Measurement of' ' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel ' 'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].' ) mandatoryArguments = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatoryArguments.add_argument( '-i', required=True, help=""R|Image to compute the SNR on. (Example: b0s.nii.gz)\n"" ""- For '-method diff' and '-method mult', the image must be 4D, as SNR will be computed "" ""along the 4th dimension.\n"" ""- For '-method single', the image can either be 3D or 4D. If a 4D image is passed, a specific "" ""3D volume should be specified using the '-vol' argument."", metavar=Metavar.file) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( '-m', help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz', metavar=Metavar.file, default='') optional.add_argument( '-m-noise', help=""Binary (or weighted) mask within which noise will be calculated. Only valid for '-method single'."", metavar=Metavar.file, default='') optional.add_argument( '-method', help='R|Method to use to compute the SNR (default: diff):\n' ""- diff: Substract two volumes (defined by -vol) and estimate noise variance within the ROI "" ""(flag '-m' is required). Requires a 4D volume.\n"" ""- mult: Estimate noise variance over time across volumes specified with '-vol'. Requires a 4D volume.\n"" ""- single: Estimates noise variance in a 5x5 square at the corner of the image, and average the mean "" ""signal inside the ROI specified by flag '-m'. The variance and mean are corrected for Rayleigh "" ""distributions. This corresponds to the cases SNRstd and SNRmean in the Dietrich et al. article. Uses a "" ""3D or a 4D volume. If a 4D volume is input, the volume to compute SNR on is specified by '-vol'."", choices=('diff', 'mult', 'single'), default='diff') optional.add_argument( '-vol', help=""Volumes to compute SNR from. Separate with ',' (Example: '-vol 0,1'), or select range "" ""using ':' (Example: '-vol 2:50'). By default, all volumes in are selected, except if '-method single' "" ""in which case the first volume is selected."", metavar=Metavar.str, default='') optional.add_argument( '-r', type=int, help='Remove temporary files.', default=1, choices=(0, 1)) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"") optional.add_argument( '-o', metavar=Metavar.str, type=str, default=None, help=""File name to write the computed SNR to."" ) return parser " 43782,"def angle(tensor): """"""Returns the element-wise angle of a complex tensor. Args: tensor (tensor_like): input tensor Returns: tensor_like: **Example** >>> a = torch.tensor([1.0, 1.0j, 1+1j], requires_grad=True) >>> angle(a) tensor([0.0000, 1.5708, 0.7854], grad_fn=) """""" return TensorBox(tensor).angle(wrap_output=False) ","def angle(tensor): """"""Returns the element-wise angle of a complex tensor. Args: tensor (tensor_like): input tensor Returns: tensor_like: **Example** >>> a = torch.tensor([1.0, 1.0j, 1+1j], requires_grad=True) >>> angle(a) tensor([0.0000, 1.5708, 0.7854], grad_fn=) """""" return TensorBox(tensor).angle(wrap_output=False) " 8389,"def template_match(observed_spectrum, spectral_templates, resample_method=""flux_conserving"", min_redshift=None, max_redshift=None, delta_redshift=None): """""" Find which spectral templates is the best fit to an observed spectrum by computing the chi-squared. If two template_spectra have the same chi2, the first template is returned. Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list` That will give a single :class:`~specutils.Spectrum1D` when iterated over. The template spectra, which will be resampled, normalized, and compared to the observed spectrum, where the smallest chi2 and normalized template spectrum will be returned. resample_method : `string` Three resample options: flux_conserving, linear_interpolated, and spline_interpolated. Anything else does not resample the spectrum. min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- normalized_template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum that has been normalized. chi2 : `float` The chi2 of the flux of the observed_spectrum and the flux of the normalized template spectrum. smallest_chi_index : `int` The index of the spectrum with the smallest chi2 in spectral templates. """""" if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1: # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates, min_redshift, max_redshift, delta_redshift) spectral_templates = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectral_templates, resample_method) return normalized_spectral_template, chi2 # At this point, the template spectrum is either a ``SpectrumCollection`` # or a multi-dimensional``Spectrum1D``. Loop through the object and return # the template spectrum with the lowest chi square and its corresponding # chi square. chi2_min = None smallest_chi_spec = None for index, spectrum in enumerate(spectral_templates): # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum, min_redshift, max_redshift, delta_redshift) spectrum = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectrum, resample_method) if chi2_min is None or chi2 < chi2_min: chi2_min = chi2 smallest_chi_spec = normalized_spectral_template smallest_chi_index = index return smallest_chi_spec, chi2_min, smallest_chi_index ","def template_match(observed_spectrum, spectral_templates, resample_method=""flux_conserving"", min_redshift=None, max_redshift=None, delta_redshift=None): """""" Find which spectral templates is the best fit to an observed spectrum by computing the chi-squared. If two template_spectra have the same chi2, the first template is returned. Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list` That will give a single :class:`~specutils.Spectrum1D` when iterated over. The template spectra, which will be resampled, normalized, and compared to the observed spectrum, where the smallest chi2 and normalized template spectrum will be returned. resample_method : `string` Three resample options: flux_conserving, linear_interpolated, and spline_interpolated. Anything else does not resample the spectrum. min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- normalized_template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum that has been normalized. chi2 : `float` The chi2 of the flux of the observed_spectrum and the flux of the normalized template spectrum. smallest_chi_index : `int` The index of the spectrum with the smallest chi2 in spectral templates. """""" if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1: # Account for redshift if provided if all(x is not None for x in (min_redshift, max_redshift, delta_redshift)): redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates, min_redshift, max_redshift, delta_redshift) spectral_templates = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectral_templates, resample_method) return normalized_spectral_template, chi2 # At this point, the template spectrum is either a ``SpectrumCollection`` # or a multi-dimensional``Spectrum1D``. Loop through the object and return # the template spectrum with the lowest chi square and its corresponding # chi square. chi2_min = None smallest_chi_spec = None for index, spectrum in enumerate(spectral_templates): # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum, min_redshift, max_redshift, delta_redshift) spectrum = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectrum, resample_method) if chi2_min is None or chi2 < chi2_min: chi2_min = chi2 smallest_chi_spec = normalized_spectral_template smallest_chi_index = index return smallest_chi_spec, chi2_min, smallest_chi_index " 3354,"def query_top_tags( params: Mapping[str, str], tag_key: str, limit: int, filter_query: Optional[str] = None, referrer: Optional[str] = None, ) -> Optional[Dict]: """""" Fetch counts by tag value, finding the top tag values for a tag key by a limit. :return: Returns the row with the value, the aggregate and the count if the query was successful Returns None if query was not successful which causes the endpoint to return early """""" with sentry_sdk.start_span( op=""discover.discover"", description=""facets.filter_transform"" ) as span: span.set_data(""query"", filter_query) snuba_filter = discover.get_filter(filter_query, params) # Resolve the public aliases into the discover dataset names. snuba_filter, translated_columns = discover.resolve_discover_aliases(snuba_filter) with sentry_sdk.start_span(op=""discover.discover"", description=""facets.top_tags""): conditions = [] conditions.append([""tags_key"", ""IN"", [tag_key]]) # Get the average and count to use to filter the next request to facets tag_data = discover.query( selected_columns=[ ""count()"", ""array_join(tags.value) as tags_value"", ], conditions=conditions, query=filter_query, params=params, orderby=[""-count""], functions_acl=[""array_join""], referrer=f""{referrer}.top_tags"", limit=limit, ) if len(tag_data[""data""]) <= 0: return None counts = [r[""count""] for r in tag_data[""data""]] # Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist if counts[0] == 0: return None if not tag_data[""data""]: return None return tag_data[""data""] ","def query_top_tags( params: Mapping[str, str], tag_key: str, limit: int, filter_query: Optional[str] = None, referrer: Optional[str] = None, ) -> Optional[List[Any]]: """""" Fetch counts by tag value, finding the top tag values for a tag key by a limit. :return: Returns the row with the value, the aggregate and the count if the query was successful Returns None if query was not successful which causes the endpoint to return early """""" with sentry_sdk.start_span( op=""discover.discover"", description=""facets.filter_transform"" ) as span: span.set_data(""query"", filter_query) snuba_filter = discover.get_filter(filter_query, params) # Resolve the public aliases into the discover dataset names. snuba_filter, translated_columns = discover.resolve_discover_aliases(snuba_filter) with sentry_sdk.start_span(op=""discover.discover"", description=""facets.top_tags""): conditions = [] conditions.append([""tags_key"", ""IN"", [tag_key]]) # Get the average and count to use to filter the next request to facets tag_data = discover.query( selected_columns=[ ""count()"", ""array_join(tags.value) as tags_value"", ], conditions=conditions, query=filter_query, params=params, orderby=[""-count""], functions_acl=[""array_join""], referrer=f""{referrer}.top_tags"", limit=limit, ) if len(tag_data[""data""]) <= 0: return None counts = [r[""count""] for r in tag_data[""data""]] # Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist if counts[0] == 0: return None if not tag_data[""data""]: return None return tag_data[""data""] " 20203,"def purge(url=None): akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {}) cloudfront_config = settings.WAGTAILFRONTENDCACHE.get( 'cloudfront', {}) if url: # Use the Wagtail frontendcache PurgeBatch to perform the purge batch = PurgeBatch() batch.add_url(url) # If the URL matches any of our cloudfront distributions, invalidate # with that backend if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {}) if k in url): logger.info('Purging {} from cloudfront'.format(url)) batch.purge(backends='cloudfront') # Otherwise invalidate with our default backend else: logger.info('Purging {} from akamai'.format(url)) batch.purge(backends='akamai') return ""Submitted invalidation for %s"" % url else: # purge_all only exists on our AkamaiBackend backend = AkamaiBackend(akamai_config) logger.info('Purging entire site from akamai') backend.purge_all() return ""Submitted invalidation for the entire site."" ","def purge(url=None): akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {}) cloudfront_config = settings.WAGTAILFRONTENDCACHE.get( 'cloudfront', {}) if url: # Use the Wagtail frontendcache PurgeBatch to perform the purge batch = PurgeBatch() batch.add_url(url) # If the URL matches any of our cloudfront distributions, invalidate # with that backend if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {}) if k in url): logger.info('Purging {} from cloudfront'.format(url)) batch.purge(backends='cloudfront') # Otherwise invalidate with our default backend else: logger.info('Purging {} from Akamai'.format(url)) batch.purge(backends='akamai') return ""Submitted invalidation for %s"" % url else: # purge_all only exists on our AkamaiBackend backend = AkamaiBackend(akamai_config) logger.info('Purging entire site from akamai') backend.purge_all() return ""Submitted invalidation for the entire site."" " 39427,"def get_cmap_safe(cmap): """"""Fetch a colormap by name from matplotlib, colorcet, or cmocean."""""" if isinstance(cmap, str): # check if this colormap has been mapped between ipygany if cmap in IPYGANY_MAP: cmap = IPYGANY_MAP[cmap] # Try colorcet first if has_module('colorcet'): import colorcet try: return colorcet.cm[cmap] except KeyError: pass # Try cmocean second if has_module('cmocean'): import cmocean try: return getattr(cmocean.cm, cmap) except AttributeError: pass # Else use Matplotlib if not has_module('matplotlib'): raise ImportError( 'The use of custom colormaps requires the installation of matplotlib.' ) from matplotlib.cm import get_cmap cmap = get_cmap(cmap) elif isinstance(cmap, list): for item in cmap: if not isinstance(item, str): raise TypeError('When inputting a list as a cmap, each item should be a string.') if not has_module('matplotlib'): raise ImportError( 'The use of custom colormaps requires the installation of matplotlib.' ) from matplotlib.colors import ListedColormap cmap = ListedColormap(cmap) return cmap ","def get_cmap_safe(cmap): """"""Fetch a colormap by name from matplotlib, colorcet, or cmocean."""""" if isinstance(cmap, str): # check if this colormap has been mapped between ipygany if cmap in IPYGANY_MAP: cmap = IPYGANY_MAP[cmap] # Try colorcet first if has_module('colorcet'): import colorcet try: return colorcet.cm[cmap] except KeyError: pass # Try cmocean second if has_module('cmocean'): import cmocean try: return getattr(cmocean.cm, cmap) except AttributeError: pass # Else use Matplotlib if not has_module('matplotlib'): raise ImportError( 'The use of custom colormaps requires the installation of matplotlib.' ) from matplotlib.cm import get_cmap cmap = get_cmap(cmap) elif isinstance(cmap, list): for item in cmap: if not isinstance(item, str): raise TypeError('When inputting a list as a cmap, each item should be a string.') if not has_module('matplotlib'): raise ImportError( 'The use of custom colormaps requires the installation of matplotlib.' ) # pragma: no cover from matplotlib.colors import ListedColormap cmap = ListedColormap(cmap) return cmap " 38522,"def duplicate_nodes(g, nodes, offset): """""" Duplicate nodes on a fracture. The number of duplication will depend on the cell topology around the node. If the node is not on a fracture 1 duplicate will be added. If the node is on a single fracture 2 duplicates will be added. If the node is on a T-intersection 3 duplicates will be added. If the node is on a X-intersection 4 duplicates will be added. Equivalently for other types of intersections. Parameters: ---------- g - The grid for which the nodes are duplicated nodes - The nodes to be duplicated offset - How far from the original node the duplications should be placed. """""" # In the case of a non-zero offset (presumably intended for visualization), use a # (somewhat slow) legacy implementation which can handle this. if offset != 0: return _duplicate_nodes_with_offset(g, nodes, offset) # Nodes must be duplicated in the array of node coordinates. Moreover, the face-node # relation must be updated so that when a node is split in two or more, all faces on # each of the spitting lines / planes are assigned the same version / index of the # spit node. The modification of node numbering further means that the face-node relation # must be updated also for faces not directly involved in the splitting. # # The below implementation consists of the following major steps: # 1. Isolate clusters of cells surrounding each node to be split, and make connection maps # that include only cells within each cluster. # 2. Use the connection map to further subdivide the clusters into parts that lay on # different sides of dividing lines / planes. # 3. Modify the face-node relation by splitting nodes. Also update node numbering in # unsplit nodes. # 4. Duplicate split nodes in the coordinate array. # Bookeeping etc. cell_node = g.cell_nodes().tocsr() face_node = g.face_nodes.tocsc() cell_face = g.cell_faces num_nodes_to_duplicate = nodes.size ## Step 1 # Create a list where each item are the cells associated with a node to be expanded. cell_clusters = [ np.unique(pp.matrix_operations.slice_indices(cell_node, n)) for n in nodes ] # Number of cells in each cluster. sz_cell_clusters = [c.size for c in cell_clusters] tot_sz = np.sum([sz_cell_clusters]) # Create a mapping of cells from linear ordering to the clusters. # Separate variable for the rows - these will be used to map back from the cluster # cell numbering to the standard numbering rows_cell_map = np.hstack(cell_clusters) cell_map = sps.coo_matrix( (np.ones(tot_sz), (rows_cell_map, np.arange(tot_sz))), shape=(g.num_cells, tot_sz), ).tocsc() # Connection map between cells, limited to the cells included in the clusters. # Cells may occur more than once in the map (if several of the cell's nodes are to be # split) and there may be connections between cells associated with different nodes. cf_loc = cell_face * cell_map c2c = cf_loc.T * cf_loc # All non-zero data signifies connections; simplify the representation c2c.data = np.clip(np.abs(c2c.data), 0, 1) # The connection matrix is known to be symmetric, and we only need to handle the upper # triangular part c2c = sps.triu(c2c) # Remove matrix elements outside the blocks to decouple connections between cells # associated with different nodes. Do this by identifying elements in the sparse # storage format outside the blocks, and set their matrix values to zero. # This will leave a block diagonal connection matrix, one block per node. # All non-zero elements in c2c. row_c2c, col_c2c, dat_c2c = sps.find(c2c) # Get sorted (increasing columns) version of the matrix. This allows for iteration through # the columns of the matrix. sort_ind = np.argsort(col_c2c) sorted_rows = row_c2c[sort_ind] sorted_cols = col_c2c[sort_ind] sorted_data = dat_c2c[sort_ind] # Array to keep indices to remove remove_ind = np.zeros(sorted_rows.size, dtype=np.bool) # Array with the start of the blocks corresponding to each cluster. block_start = np.hstack((0, np.cumsum([sz_cell_clusters]))) # Iteration index for the start of the column group in the matrix fields 'indices' and # 'data' (referring to the sparse storage). col_group_start: int = 0 # Loop over all groups of columns (one group per node nodes). Find the matrix elements # of this block, take note of elements outside the column indices (these will be # couplings to other nodes). for bi in range(num_nodes_to_duplicate): # Data for this block ends with the first column that belongs to the next block. # Note that we only search from the start index of this block, and use this as # an offset (saves time). col_group_end: int = col_group_start + np.argmax( sorted_cols[col_group_start:] == block_start[bi + 1] ) # Special case for the last iteration: the last element in block_start has value # one higher than the number of rows, thus the equality above is never met, and # argmax returns the first element in the comparison. Correct this to let the # slice run to the end of the arrays. if bi == num_nodes_to_duplicate - 1: col_group_end = sorted_cols.size # Indices of elements in these rows. block_inds = slice(col_group_start, col_group_end) # Rows that are outside this block outside = np.logical_or( sorted_rows[block_inds] < block_start[bi], sorted_rows[block_inds] >= block_start[bi + 1], ) # Mark matrix elements belonging to outside rows for removal remove_ind[block_inds][outside] = 1 # The end of this column group becomes the start of the next one. col_group_start = col_group_end # Remove all data outside the main blocks. sorted_data[remove_ind] = 0 # Make a new, block-diagonal connection matrix. # IMPLEMENTATION NOTE: Going to a csc matrix should be straightforward, # since sc already is sorted. It is however not clear networkx will be faster # with a non-coo matrix. c2c_loc = sps.coo_matrix((sorted_data, (sorted_rows, sorted_cols)), shape=c2c.shape) # Drop all zero elements c2c_loc.eliminate_zeros() ## Step 2 # Now the connection matrix only contains connection between cells that share a node # to be duplicated. These can again be split into subclusters, that have lost their # connections due to the previous splitting of faces. # Identify these subclusters by the use of networkx graph = nx.Graph(c2c_loc) subclusters = [sorted(list(c)) for c in nx.connected_components(graph)] # For each subcluster, find its associated node (to be split) node_of_subcluster = [] search_start = 0 for comp in subclusters: # Find the first element with index one too much, then subtract one. # See the above loop (col_group_end) for further comments. # Also note we could have used any element in comp. ind = search_start + np.argmax(block_start[search_start:] > comp[0]) - 1 # Store this node index node_of_subcluster.append(ind) # Start of next search interval. search_start = ind node_of_component = np.array(node_of_subcluster) ## Step 3 # Modify the face-node relation by adjusting the node indices (field indices in the # sparse storage of the matrix). The duplicated nodes are added right after the # original node in the node ordering. Two adjustments are thus needed: First the # insertion of extra nodes, second this insertion increases the index of all nodes # with higher index. # Copy node-indices in the face-node relation. The first copy will preserve the old # node ordering. The second will carry the local adjustments due to the old_node_ind = face_node.indices.copy() new_node_ind = face_node.indices.copy() # Loop over all the subclusters of cells. The faces of the cells that have the # associated node to be split have the node index increased, depending on how many # times the node has been encountered before. # Count the number of encounters for a node. node_occ = np.zeros(num_nodes_to_duplicate, dtype=int) # Loop over combination of nodes and subclusters for ni, comp in zip(node_of_component, subclusters): # If the increase in node index is zero, there is no need to do anything. if node_occ[ni] == 0: node_occ[ni] += 1 continue # Map cell indexes from the ordering in the clusters back to global ordering loc_cells = rows_cell_map[comp] # Faces of these cells loc_faces = np.unique( pp.matrix_operations.slice_indices(g.cell_faces, loc_cells) ) # Nodes of the faces, and indices in the sparse storage format where the nodes # are located. loc_nodes, data_ind = pp.matrix_operations.slice_indices( face_node, loc_faces, return_array_ind=True ) # Indices in the sparse storage that should be increased incr_ind = data_ind[loc_nodes == nodes[ni]] # Increase the node index according to previous encounters. new_node_ind[incr_ind] += node_occ[ni] # Take note of this iteration node_occ[ni] += 1 # Count the number of repititions in the nodes: The unsplit nodes have 1, the split # depends on the number of identified subclusters repititions = np.ones(g.num_nodes, dtype=np.int32) repititions[nodes] = np.bincount(node_of_component) # The number of added nodes added = repititions - 1 num_added = added.sum() # Array of cumulative increments due to the splitting of nodes with lower index. # Put a zero up front to make the adjustment for the nodes with higher index increment = np.cumsum(np.hstack((0, added))) # The new node indices are formed by combining the two sources of adjustment. # Both split and unsplit nodes are impacted by the increments. # The increments must be taken with respect to the old indices face_node.indices = (new_node_ind + increment[old_node_ind]).astype(np.int32) # Ensure the right format of the sparse storage. Somehow this got messed up somewhere. face_node.indptr = face_node.indptr.astype(np.int32) # Adjust the shape of face-nodes to account for the added nodes face_node._shape = (g.num_nodes + num_added, g.num_faces) # From the number of repititions of the node (1 for untouched nodes), # get mapping from new to old indices. # To see how this works, read the documentation of rldecode, including the examples. new_2_old_nodes = pp.matrix_operations.rldecode( np.arange(repititions.size), repititions ) g.nodes = g.nodes[:, new_2_old_nodes] # The global point ind is shared by all split nodes g.global_point_ind = g.global_point_ind[new_2_old_nodes] # Also map the tags for nodes that are on fracture tips if this is relevant # (that is, if the grid is of the highest dimension) keys = [""node_is_fracture_tip"", ""node_is_tip_of_some_fracture""] for key in keys: if hasattr(g, ""tags"") and key in g.tags: g.tags[key] = g.tags[key][new_2_old_nodes].astype(bool) return num_added ","def duplicate_nodes(g, nodes, offset): """""" Duplicate nodes on a fracture. The number of duplication will depend on the cell topology around the node. If the node is not on a fracture 1 duplicate will be added. If the node is on a single fracture 2 duplicates will be added. If the node is on a T-intersection 3 duplicates will be added. If the node is on a X-intersection 4 duplicates will be added. Equivalently for other types of intersections. Parameters: ---------- g - The grid for which the nodes are duplicated nodes - The nodes to be duplicated offset - How far from the original node the duplications should be placed. """""" # In the case of a non-zero offset (presumably intended for visualization), use a # (somewhat slow) legacy implementation which can handle this. if offset != 0: return _duplicate_nodes_with_offset(g, nodes, offset) # Nodes must be duplicated in the array of node coordinates. Moreover, the face-node # relation must be updated so that when a node is split in two or more, all faces on # each of the spitting lines / planes are assigned the same version / index of the # spit node. The modification of node numbering further means that the face-node relation # must be updated also for faces not directly involved in the splitting. # # The below implementation consists of the following major steps: # 1. Isolate clusters of cells surrounding each node to be split, and make connection maps # that include only cells within each cluster. # 2. Use the connection map to further subdivide the clusters into parts that lay on # different sides of dividing lines / planes. # 3. Modify the face-node relation by splitting nodes. Also update node numbering in # unsplit nodes. # 4. Duplicate split nodes in the coordinate array. # Bookeeping etc. cell_node = g.cell_nodes().tocsr() face_node = g.face_nodes.tocsc() cell_face = g.cell_faces num_nodes_to_duplicate = nodes.size ## Step 1 # Create a list where each item is the cells associated with a node to be expanded. cell_clusters = [ np.unique(pp.matrix_operations.slice_indices(cell_node, n)) for n in nodes ] # Number of cells in each cluster. sz_cell_clusters = [c.size for c in cell_clusters] tot_sz = np.sum([sz_cell_clusters]) # Create a mapping of cells from linear ordering to the clusters. # Separate variable for the rows - these will be used to map back from the cluster # cell numbering to the standard numbering rows_cell_map = np.hstack(cell_clusters) cell_map = sps.coo_matrix( (np.ones(tot_sz), (rows_cell_map, np.arange(tot_sz))), shape=(g.num_cells, tot_sz), ).tocsc() # Connection map between cells, limited to the cells included in the clusters. # Cells may occur more than once in the map (if several of the cell's nodes are to be # split) and there may be connections between cells associated with different nodes. cf_loc = cell_face * cell_map c2c = cf_loc.T * cf_loc # All non-zero data signifies connections; simplify the representation c2c.data = np.clip(np.abs(c2c.data), 0, 1) # The connection matrix is known to be symmetric, and we only need to handle the upper # triangular part c2c = sps.triu(c2c) # Remove matrix elements outside the blocks to decouple connections between cells # associated with different nodes. Do this by identifying elements in the sparse # storage format outside the blocks, and set their matrix values to zero. # This will leave a block diagonal connection matrix, one block per node. # All non-zero elements in c2c. row_c2c, col_c2c, dat_c2c = sps.find(c2c) # Get sorted (increasing columns) version of the matrix. This allows for iteration through # the columns of the matrix. sort_ind = np.argsort(col_c2c) sorted_rows = row_c2c[sort_ind] sorted_cols = col_c2c[sort_ind] sorted_data = dat_c2c[sort_ind] # Array to keep indices to remove remove_ind = np.zeros(sorted_rows.size, dtype=np.bool) # Array with the start of the blocks corresponding to each cluster. block_start = np.hstack((0, np.cumsum([sz_cell_clusters]))) # Iteration index for the start of the column group in the matrix fields 'indices' and # 'data' (referring to the sparse storage). col_group_start: int = 0 # Loop over all groups of columns (one group per node nodes). Find the matrix elements # of this block, take note of elements outside the column indices (these will be # couplings to other nodes). for bi in range(num_nodes_to_duplicate): # Data for this block ends with the first column that belongs to the next block. # Note that we only search from the start index of this block, and use this as # an offset (saves time). col_group_end: int = col_group_start + np.argmax( sorted_cols[col_group_start:] == block_start[bi + 1] ) # Special case for the last iteration: the last element in block_start has value # one higher than the number of rows, thus the equality above is never met, and # argmax returns the first element in the comparison. Correct this to let the # slice run to the end of the arrays. if bi == num_nodes_to_duplicate - 1: col_group_end = sorted_cols.size # Indices of elements in these rows. block_inds = slice(col_group_start, col_group_end) # Rows that are outside this block outside = np.logical_or( sorted_rows[block_inds] < block_start[bi], sorted_rows[block_inds] >= block_start[bi + 1], ) # Mark matrix elements belonging to outside rows for removal remove_ind[block_inds][outside] = 1 # The end of this column group becomes the start of the next one. col_group_start = col_group_end # Remove all data outside the main blocks. sorted_data[remove_ind] = 0 # Make a new, block-diagonal connection matrix. # IMPLEMENTATION NOTE: Going to a csc matrix should be straightforward, # since sc already is sorted. It is however not clear networkx will be faster # with a non-coo matrix. c2c_loc = sps.coo_matrix((sorted_data, (sorted_rows, sorted_cols)), shape=c2c.shape) # Drop all zero elements c2c_loc.eliminate_zeros() ## Step 2 # Now the connection matrix only contains connection between cells that share a node # to be duplicated. These can again be split into subclusters, that have lost their # connections due to the previous splitting of faces. # Identify these subclusters by the use of networkx graph = nx.Graph(c2c_loc) subclusters = [sorted(list(c)) for c in nx.connected_components(graph)] # For each subcluster, find its associated node (to be split) node_of_subcluster = [] search_start = 0 for comp in subclusters: # Find the first element with index one too much, then subtract one. # See the above loop (col_group_end) for further comments. # Also note we could have used any element in comp. ind = search_start + np.argmax(block_start[search_start:] > comp[0]) - 1 # Store this node index node_of_subcluster.append(ind) # Start of next search interval. search_start = ind node_of_component = np.array(node_of_subcluster) ## Step 3 # Modify the face-node relation by adjusting the node indices (field indices in the # sparse storage of the matrix). The duplicated nodes are added right after the # original node in the node ordering. Two adjustments are thus needed: First the # insertion of extra nodes, second this insertion increases the index of all nodes # with higher index. # Copy node-indices in the face-node relation. The first copy will preserve the old # node ordering. The second will carry the local adjustments due to the old_node_ind = face_node.indices.copy() new_node_ind = face_node.indices.copy() # Loop over all the subclusters of cells. The faces of the cells that have the # associated node to be split have the node index increased, depending on how many # times the node has been encountered before. # Count the number of encounters for a node. node_occ = np.zeros(num_nodes_to_duplicate, dtype=int) # Loop over combination of nodes and subclusters for ni, comp in zip(node_of_component, subclusters): # If the increase in node index is zero, there is no need to do anything. if node_occ[ni] == 0: node_occ[ni] += 1 continue # Map cell indexes from the ordering in the clusters back to global ordering loc_cells = rows_cell_map[comp] # Faces of these cells loc_faces = np.unique( pp.matrix_operations.slice_indices(g.cell_faces, loc_cells) ) # Nodes of the faces, and indices in the sparse storage format where the nodes # are located. loc_nodes, data_ind = pp.matrix_operations.slice_indices( face_node, loc_faces, return_array_ind=True ) # Indices in the sparse storage that should be increased incr_ind = data_ind[loc_nodes == nodes[ni]] # Increase the node index according to previous encounters. new_node_ind[incr_ind] += node_occ[ni] # Take note of this iteration node_occ[ni] += 1 # Count the number of repititions in the nodes: The unsplit nodes have 1, the split # depends on the number of identified subclusters repititions = np.ones(g.num_nodes, dtype=np.int32) repititions[nodes] = np.bincount(node_of_component) # The number of added nodes added = repititions - 1 num_added = added.sum() # Array of cumulative increments due to the splitting of nodes with lower index. # Put a zero up front to make the adjustment for the nodes with higher index increment = np.cumsum(np.hstack((0, added))) # The new node indices are formed by combining the two sources of adjustment. # Both split and unsplit nodes are impacted by the increments. # The increments must be taken with respect to the old indices face_node.indices = (new_node_ind + increment[old_node_ind]).astype(np.int32) # Ensure the right format of the sparse storage. Somehow this got messed up somewhere. face_node.indptr = face_node.indptr.astype(np.int32) # Adjust the shape of face-nodes to account for the added nodes face_node._shape = (g.num_nodes + num_added, g.num_faces) # From the number of repititions of the node (1 for untouched nodes), # get mapping from new to old indices. # To see how this works, read the documentation of rldecode, including the examples. new_2_old_nodes = pp.matrix_operations.rldecode( np.arange(repititions.size), repititions ) g.nodes = g.nodes[:, new_2_old_nodes] # The global point ind is shared by all split nodes g.global_point_ind = g.global_point_ind[new_2_old_nodes] # Also map the tags for nodes that are on fracture tips if this is relevant # (that is, if the grid is of the highest dimension) keys = [""node_is_fracture_tip"", ""node_is_tip_of_some_fracture""] for key in keys: if hasattr(g, ""tags"") and key in g.tags: g.tags[key] = g.tags[key][new_2_old_nodes].astype(bool) return num_added " 28222,"def experiments(conn: Optional[ConnectionPlus] = None) -> List[Experiment]: """""" List all the experiments in the container (database file from config) Args: conn: connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: All the experiments in the container """""" log.info(""loading experiments from {}"".format(conn)) conn = conn or connect(get_DB_location(), get_DB_debug()) rows = get_experiments(conn) experiments = [] for row in rows: experiments.append(load_experiment(row['exp_id'], conn)) return experiments ","def experiments(conn: Optional[ConnectionPlus] = None) -> List[Experiment]: """""" List all the experiments in the container (database file from config) Args: conn: connection to the database. If not supplied, a new connection to the DB file specified in the config is made Returns: All the experiments in the container """""" log.info(""loading experiments from {}"".format(conn)) conn = conn_from_dbpath_or_conn(conn=conn, path_to_db=None) rows = get_experiments(conn) experiments = [] for row in rows: experiments.append(load_experiment(row['exp_id'], conn)) return experiments " 52013,"def get_sitemap_path(section, app, page=1): if section is None or app is None: # If we don't have a section or app, we don't need a complex directory # structure and we can call the first page 'sitemap' for convenience # (it's likely going to be the only page). endpath = str(page) if page != 1 else 'sitemap' else: endpath = id_to_path(page) return os.path.join( settings.SITEMAP_STORAGE_PATH, section if section else '', app if app else '', f'{endpath}.xml', ) ","def get_sitemap_path(section, app, page=1): if section is None or app is None: # If we don't have a section or app, we don't need a complex directory # structure and we can call the first page 'sitemap' for convenience # (it's likely going to be the only page). endpath = str(page) if page != 1 else 'sitemap' else: endpath = id_to_path(page) return os.path.join( settings.SITEMAP_STORAGE_PATH, section or '', app or '', f'{endpath}.xml', ) " 43855,"def vacuum_state(wires, hbar=2.0): r""""""Returns the vacuum state. Args: wires (int): the number of wires to create the vacuum for hbar (float): (default 2) the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar` Returns: array: the vacuum state """""" means = np.zeros((2 * wires)) cov = np.identity(2 * wires) * hbar / 2 state = [means, cov] return state ","def vacuum_state(wires, hbar=2.0): r""""""Returns the vacuum state. Args: wires (int): the number of wires to initialize in the vacuum state hbar (float): (default 2) the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar` Returns: array: the vacuum state """""" means = np.zeros((2 * wires)) cov = np.identity(2 * wires) * hbar / 2 state = [means, cov] return state " 31988,"def build_and_authenticate(params: dict): """""" Return a service object via which can call GRM API. Use the service_account credential file generated in the Google Cloud Platform to build the Google Resource Manager API Service object. returns: service Google Resource Manager API Service object via which commands in the integration will make API calls """""" service_account_credentials = params.get('service_account_credentials', {}) service_account_credentials = json.loads(service_account_credentials.get('password')) credentials = service_account.ServiceAccountCredentials.from_json_keyfile_dict(service_account_credentials, scopes=SCOPES) # add delegation to help manage the UI - link to a google-account if params.get('user_id', None) is not None: credentials = credentials.create_delegated(params.get('user_id')) proxy = params.get('proxy', False) disable_ssl = params.get('insecure', False) if proxy or disable_ssl: http_client = credentials.authorize(get_http_client_with_proxy(proxy, disable_ssl)) return build('sheets', 'v4', http=http_client) else: return build('sheets', 'v4', credentials=credentials) ","def build_and_authenticate(params: dict): """""" Return a service object via which can call GRM API. Use the service_account credential file generated in the Google Cloud Platform to build the Google Resource Manager API Service object. returns: service Google Resource Manager API Service object via which commands in the integration will make API calls """""" service_account_credentials = params.get('service_account_credentials', {}) service_account_credentials = json.loads(service_account_credentials.get('password')) credentials = service_account.ServiceAccountCredentials.from_json_keyfile_dict(service_account_credentials, scopes=SCOPES) # add delegation to help manage the UI - link to a google-account if params.get('user_id'): credentials = credentials.create_delegated(params.get('user_id')) proxy = params.get('proxy', False) disable_ssl = params.get('insecure', False) if proxy or disable_ssl: http_client = credentials.authorize(get_http_client_with_proxy(proxy, disable_ssl)) return build('sheets', 'v4', http=http_client) else: return build('sheets', 'v4', credentials=credentials) " 40175,"def test_SemiSupervisedDataLoader(): # test label resampling n_samples_per_label = 10 a = synthetic_iid() dl = SemiSupervisedDataLoader( a, indices=np.arange(a.n_obs), labels_obs_key=""labels"", unlabeled_category=""label_0"", n_samples_per_label=n_samples_per_label, ) labeled_dl_idx = dl.dataloaders[1].indices n_labels = 2 assert len(labeled_dl_idx) == n_samples_per_label * n_labels dl.resample_labels() resampled_labeled_dl_idx = dl.dataloaders[1].indices assert len(resampled_labeled_dl_idx) == n_samples_per_label * n_labels # check labeled indices was actually resampled assert np.sum(labeled_dl_idx == resampled_labeled_dl_idx) != len(labeled_dl_idx) ","def test_semisupervised_dataloader(): # test label resampling n_samples_per_label = 10 a = synthetic_iid() dl = SemiSupervisedDataLoader( a, indices=np.arange(a.n_obs), labels_obs_key=""labels"", unlabeled_category=""label_0"", n_samples_per_label=n_samples_per_label, ) labeled_dl_idx = dl.dataloaders[1].indices n_labels = 2 assert len(labeled_dl_idx) == n_samples_per_label * n_labels dl.resample_labels() resampled_labeled_dl_idx = dl.dataloaders[1].indices assert len(resampled_labeled_dl_idx) == n_samples_per_label * n_labels # check labeled indices was actually resampled assert np.sum(labeled_dl_idx == resampled_labeled_dl_idx) != len(labeled_dl_idx) " 43528,"def BasisStatePreparation(basis_state, wires): r"""""" Prepares a basis state on the given wires using a sequence of Pauli X gates. Args: basis_state (array): Input array of shape ``(N,)``, where N is the number of qubits, with :math:`N\leq n` wires (Sequence[int]): sequence of qubit indices that the template acts on """""" if not isinstance(wires, Iterable): raise ValueError( ""Wires needs to be a list of wires that the embedding uses; got {}."".format(wires) ) if not len(basis_state) == len(wires): raise ValueError( ""Number of qubits must be equal to the number of wires, which is {}; "" ""got {}."".format(len(wires), len(basis_state)) ) if any([x not in [0, 1] for x in basis_state]): raise ValueError( ""Basis state must only consist of 0s and 1s, got {}"".format(basis_state) ) for wire, state in zip(wires, basis_state): if state == 1: qml.PauliX(wire) ","def BasisStatePreparation(basis_state, wires): r"""""" Prepares a basis state on the given wires using a sequence of Pauli X gates. Args: basis_state (array): Input array of shape ``(N,)``, where N is the number of qubits, with :math:`N\leq n` wires (Sequence[int]): sequence of qubit indices that the template acts on """""" if not isinstance(wires, Iterable): raise ValueError( ""Wires must be passed as a list of integers; got {}."".format(wires) ) if not len(basis_state) == len(wires): raise ValueError( ""Number of qubits must be equal to the number of wires, which is {}; "" ""got {}."".format(len(wires), len(basis_state)) ) if any([x not in [0, 1] for x in basis_state]): raise ValueError( ""Basis state must only consist of 0s and 1s, got {}"".format(basis_state) ) for wire, state in zip(wires, basis_state): if state == 1: qml.PauliX(wire) " 22039,"def query_google_big_query(query, client_project=None, credentials=None): '''Make a query to Google BigQuery and get the result as a Vaex DataFrame. :param str query: The SQL query. :param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, falls back to the default inferred from the environment. :param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details. :rtype: DataFrame Example >>> import os os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json' >>> from vaex.contrib.io.gbq import query_google_big_query >>> query = """""" select * from `bigquery-public-data.ml_datasets.iris` where species = ""virginica"" """""" >>> df = query_google_big_query(query=query) >>> df.head(3) # sepal_length sepal_width petal_length petal_width species 0 4.9 2.5 4.5 1.7 virginica 1 5.7 2.5 5 2 virginica 2 6 2.2 5 1.5 virginica ''' client = google.cloud.bigquery.Client(project=client_project, credentials=credentials) job = client.query(query=query) return vaex.from_arrow_table(job.to_arrow()) ","def from_query(query, client_project=None, credentials=None): '''Make a query to Google BigQuery and get the result as a Vaex DataFrame. :param str query: The SQL query. :param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, falls back to the default inferred from the environment. :param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details. :rtype: DataFrame Example >>> import os os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json' >>> from vaex.contrib.io.gbq import query_google_big_query >>> query = """""" select * from `bigquery-public-data.ml_datasets.iris` where species = ""virginica"" """""" >>> df = query_google_big_query(query=query) >>> df.head(3) # sepal_length sepal_width petal_length petal_width species 0 4.9 2.5 4.5 1.7 virginica 1 5.7 2.5 5 2 virginica 2 6 2.2 5 1.5 virginica ''' client = google.cloud.bigquery.Client(project=client_project, credentials=credentials) job = client.query(query=query) return vaex.from_arrow_table(job.to_arrow()) " 58044,"def apply_dns_signature_policy_command(args) -> CommandResults: anti_spy_ware_name = args.get('Anti_spyware_profile_name') edl = args.get('DNS_signature_source') action = args.get('Action') packet_capture = args.get('Packet_capture', 'disable') params = { 'action': 'set', 'type': 'config', 'xpath': f""/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{DEVICE_GROUP}']"" f""/profiles/spyware/entry[@name='{anti_spy_ware_name}']"", 'key': API_KEY, 'element': '' f'' f'{packet_capture}' f'<{action}/>' f'' f'' } result = http_request( URL, 'POST', params=params, ) res_status = result.get('response', {}).get('@status') return CommandResults(outputs=result, outputs_prefix='Panorama.ApplyDNS', readable_output=f'**{res_status}**', ) ","def apply_dns_signature_policy_command(args) -> CommandResults: anti_spy_ware_name = args.get('Anti_spyware_profile_name') edl = args.get('DNS_signature_source') action = args.get('Action') packet_capture = args.get('Packet_capture', 'disable') params = { 'action': 'set', 'type': 'config', 'xpath': f""/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name='{DEVICE_GROUP}']"" f""/profiles/spyware/entry[@name='{anti_spy_ware_name}']"", 'key': API_KEY, 'element': '' f'' f'{packet_capture}' f'<{action}/>' f'' f'' } result = http_request( URL, 'POST', params=params, ) res_status = result.get('response', {}).get('@status') return CommandResults(outputs=result, outputs_prefix='PAN-OS.ApplyDNS', readable_output=f'**{res_status}**', raw_response=result ) " 59604,"def test_printout_wheels(tmp_path, capsys): tmp_path.joinpath(""example.0"").touch() with print_new_wheels(""TEST_MSG: {n}"", tmp_path): tmp_path.joinpath(""example.1"").write_bytes(b""0"" * 1023) tmp_path.joinpath(""example.2"").write_bytes(b""0"" * 1025) captured = capsys.readouterr() assert captured.err == """" assert ""example.0"" not in captured.out assert ""example.1 1 kB\n"" in captured.out assert ""example.2 2 kB\n"" in captured.out assert ""TEST_MSG:"" in captured.out assert ""TEST_MSG: 2\n"" in captured.out ","def test_printout_wheels(tmp_path, capsys): tmp_path.joinpath(""example.0"").touch() with print_new_wheels(""TEST_MSG: {n}"", tmp_path): tmp_path.joinpath(""example.1"").write_bytes(b""0"" * 1023) tmp_path.joinpath(""example.2"").write_bytes(b""0"" * 1025) captured = capsys.readouterr() assert captured.err == """" assert ""example.0"" not in captured.out assert ""example.1 1 kB\n"" in captured.out assert ""example.2 2 kB\n"" in captured.out assert ""TEST_MSG:"" in captured.out assert ""TEST_MSG: 2\n"" in captured.out " 24874,"def check_config_4(machine, old_conf, new_conf, new_new_conf): """"""Example code that will trigger the message Given an if-elif-elif construct When the body of the first elif ends with an elif Then the message confusing-consecutive-elif must be triggered. """""" if old_conf: machine.disable() elif not new_conf: if new_new_conf: machine.disable() elif old_conf.value != new_conf.value: machine.disable() machine.enable(new_conf.value) elif new_conf: # [confusing-consecutive-elif] machine.enable(new_conf.value) ","def triggered_if_elif_block_ends_with_elif(machine, old_conf, new_conf, new_new_conf): """"""Example code that will trigger the message Given an if-elif-elif construct When the body of the first elif ends with an elif Then the message confusing-consecutive-elif must be triggered. """""" if old_conf: machine.disable() elif not new_conf: if new_new_conf: machine.disable() elif old_conf.value != new_conf.value: machine.disable() machine.enable(new_conf.value) elif new_conf: # [confusing-consecutive-elif] machine.enable(new_conf.value) " 13618,"def main( grid_intervals: int = Argument(..., help='Grid interval count.'), training_samples: int = Argument(..., help='Number of samples used for training the reduced basis.'), verification_samples: int = Argument(..., help='Number of samples used for verification of the output error.') ): set_log_levels({'pymor': 'WARN'}) """"""Example script for using the DWR output error estimation"""""" # real valued output fom_1 = create_fom(grid_intervals, vector_valued_output=False) # vector valued output (with BlockColumnOperator) fom_2 = create_fom(grid_intervals, vector_valued_output=True) # an output which is actually a lincomb operator dim_source = fom_1.output_functional.source.dim np.random.seed(1) random_matrix_1 = np.random.rand(2, dim_source) random_matrix_2 = np.random.rand(2, dim_source) op1 = NumpyMatrixOperator(random_matrix_1, source_id='STATE') op2 = NumpyMatrixOperator(random_matrix_2, source_id='STATE') ops = [op1, op2] lincomb_op = LincombOperator(ops, [1., 0.5]) fom_3 = fom_2.with_(output_functional=lincomb_op) # all foms with different output_functionals foms = [fom_1, fom_2, fom_3] standard_reductor = SimpleCoerciveRBReductor simple_reductor = CoerciveRBReductor reductors = [standard_reductor, simple_reductor] # Parameter space and operator are equal for all foms parameter_space = fom_1.parameters.space(0.1, 1) training_set = parameter_space.sample_uniformly(training_samples) random_set = parameter_space.sample_randomly(verification_samples, seed=22) coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', fom_1.parameters) estimator_values = [] for fom in foms: for reductor in reductors: print(f'reductor: {reductor}') # generate solution snapshots primal_snapshots = fom.solution_space.empty() modes = 8 # construct training data for mu in training_set: primal_snapshots.append(fom.solve(mu)) # apply POD on bases primal_reduced_basis = pod(primal_snapshots, modes=modes)[0] from pymor.operators.constructions import IdentityOperator product = IdentityOperator(fom.solution_space) RB_reductor = reductor(fom, RB=primal_reduced_basis, product=product, coercivity_estimator=coercivity_estimator) # two different roms rom_standard = RB_reductor.reduce() rom_restricted = RB_reductor.reduce(2) for mu in random_set: s_fom = fom.output(mu=mu) for rom in [rom_standard, rom_restricted]: s_rom, s_est = rom.output(return_error_estimate=True, mu=mu) for s_r, s_f, s_e in np.dstack((s_rom, s_fom, s_est))[0]: print(f'error : {np.abs(s_r - s_f):.6f} < {s_e:.6f}') assert np.abs(s_r-s_f) <= s_e + 1e-8 # parabolic reductor from pymordemos.parabolic_mor import discretize_pymor, reduce_pod fom = discretize_pymor() fom_1 = fom.with_(output_functional=fom.rhs.operators[0].H) random_matrix_1 = np.random.rand(2, fom.solution_space.dim) op = NumpyMatrixOperator(random_matrix_1, source_id='STATE') fom_2 = fom.with_(output_functional=op) parameter_space = fom.parameters.space(1, 100) random_set = parameter_space.sample_randomly(verification_samples, seed=22) for fom in [fom_1, fom_2]: reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product) rom = reduce_pod(fom, reductor, parameter_space, training_samples*2, modes) print(f'reductor: {ParabolicRBReductor}') for mu in random_set: s_fom = fom.output(mu=mu) s_rom, s_est = rom.output(return_error_estimate=True, mu=mu, return_error_sequence=True) estimator_values.append(s_est) for s_r, s_f, s_e in np.dstack((s_rom, s_fom, s_est)).reshape(np.prod(np.shape(s_rom)), 3): print(f'error : {np.abs(s_r - s_f):.6f} < {s_e:.6f}') assert np.abs(s_r-s_f) <= s_e + 1e-8 ","def main( grid_intervals: int = Argument(..., help='Grid interval count.'), training_samples: int = Argument(..., help='Number of samples used for training the reduced basis.'), verification_samples: int = Argument(..., help='Number of samples used for verification of the output error.') ): set_log_levels({'pymor': 'WARN'}) """"""Example script for using output error estimation"""""" # real valued output fom_1 = create_fom(grid_intervals, vector_valued_output=False) # vector valued output (with BlockColumnOperator) fom_2 = create_fom(grid_intervals, vector_valued_output=True) # an output which is actually a lincomb operator dim_source = fom_1.output_functional.source.dim np.random.seed(1) random_matrix_1 = np.random.rand(2, dim_source) random_matrix_2 = np.random.rand(2, dim_source) op1 = NumpyMatrixOperator(random_matrix_1, source_id='STATE') op2 = NumpyMatrixOperator(random_matrix_2, source_id='STATE') ops = [op1, op2] lincomb_op = LincombOperator(ops, [1., 0.5]) fom_3 = fom_2.with_(output_functional=lincomb_op) # all foms with different output_functionals foms = [fom_1, fom_2, fom_3] standard_reductor = SimpleCoerciveRBReductor simple_reductor = CoerciveRBReductor reductors = [standard_reductor, simple_reductor] # Parameter space and operator are equal for all foms parameter_space = fom_1.parameters.space(0.1, 1) training_set = parameter_space.sample_uniformly(training_samples) random_set = parameter_space.sample_randomly(verification_samples, seed=22) coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', fom_1.parameters) estimator_values = [] for fom in foms: for reductor in reductors: print(f'reductor: {reductor}') # generate solution snapshots primal_snapshots = fom.solution_space.empty() modes = 8 # construct training data for mu in training_set: primal_snapshots.append(fom.solve(mu)) # apply POD on bases primal_reduced_basis = pod(primal_snapshots, modes=modes)[0] from pymor.operators.constructions import IdentityOperator product = IdentityOperator(fom.solution_space) RB_reductor = reductor(fom, RB=primal_reduced_basis, product=product, coercivity_estimator=coercivity_estimator) # two different roms rom_standard = RB_reductor.reduce() rom_restricted = RB_reductor.reduce(2) for mu in random_set: s_fom = fom.output(mu=mu) for rom in [rom_standard, rom_restricted]: s_rom, s_est = rom.output(return_error_estimate=True, mu=mu) for s_r, s_f, s_e in np.dstack((s_rom, s_fom, s_est))[0]: print(f'error : {np.abs(s_r - s_f):.6f} < {s_e:.6f}') assert np.abs(s_r-s_f) <= s_e + 1e-8 # parabolic reductor from pymordemos.parabolic_mor import discretize_pymor, reduce_pod fom = discretize_pymor() fom_1 = fom.with_(output_functional=fom.rhs.operators[0].H) random_matrix_1 = np.random.rand(2, fom.solution_space.dim) op = NumpyMatrixOperator(random_matrix_1, source_id='STATE') fom_2 = fom.with_(output_functional=op) parameter_space = fom.parameters.space(1, 100) random_set = parameter_space.sample_randomly(verification_samples, seed=22) for fom in [fom_1, fom_2]: reductor = ParabolicRBReductor(fom, product=fom.h1_0_semi_product) rom = reduce_pod(fom, reductor, parameter_space, training_samples*2, modes) print(f'reductor: {ParabolicRBReductor}') for mu in random_set: s_fom = fom.output(mu=mu) s_rom, s_est = rom.output(return_error_estimate=True, mu=mu, return_error_sequence=True) estimator_values.append(s_est) for s_r, s_f, s_e in np.dstack((s_rom, s_fom, s_est)).reshape(np.prod(np.shape(s_rom)), 3): print(f'error : {np.abs(s_r - s_f):.6f} < {s_e:.6f}') assert np.abs(s_r-s_f) <= s_e + 1e-8 " 30533,"def get_request_args(params): limit = request.args.get('n', None) offset = request.args.get('s', None) out_format = request.args.get('v', None) query = request.args.get('q', None) if limit is None: limit = try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG) else: limit = try_parse_integer(limit, CTX_LIMIT_ERR_MSG) if offset is None: offset = try_parse_integer(demisto.params().get('offset'), CTX_OFFSET_ERR_MSG) else: offset = try_parse_integer(offset, CTX_OFFSET_ERR_MSG) if out_format is None: out_format = params.get('format') else: if out_format not in ['text', 'json', 'json-seq', 'csv']: raise DemistoException(CTX_FORMAT_ERR_MSG) if query is None: query = params.get('indicators_query') return limit, offset, out_format, query ","def get_request_args(params): limit = try_parse_integer(request.args.get('n', params.get('list_size')), CTX_LIMIT_ERR_MSG) offset = request.args.get('s', None) out_format = request.args.get('v', None) query = request.args.get('q', None) if limit is None: limit = try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG) else: limit = try_parse_integer(limit, CTX_LIMIT_ERR_MSG) if offset is None: offset = try_parse_integer(demisto.params().get('offset'), CTX_OFFSET_ERR_MSG) else: offset = try_parse_integer(offset, CTX_OFFSET_ERR_MSG) if out_format is None: out_format = params.get('format') else: if out_format not in ['text', 'json', 'json-seq', 'csv']: raise DemistoException(CTX_FORMAT_ERR_MSG) if query is None: query = params.get('indicators_query') return limit, offset, out_format, query " 25128,"def group_metadata_to_zarr(group_metadata): '''Convert group metadata from N5 to zarr format.''' try: del group_metadata['n5'] except KeyError: # This only exists at the top level pass group_metadata['zarr_format'] = ZARR_FORMAT return group_metadata ","def group_metadata_to_zarr(group_metadata): '''Convert group metadata from N5 to zarr format.''' group_metadata.pop('n5') group_metadata['zarr_format'] = ZARR_FORMAT return group_metadata " 31968,"def censys_view_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" Returns host information for the specified IP address or structured certificate data for the specified SHA-256 """""" index = args.get('index', '') query = args.get('query', '') res = client.censys_view_request(index, query) if index == 'ipv4': result = res.get('result', {}) content = { 'Name': result.get('autonomous_system', {}).get('name'), 'Bgp Prefix': result.get('autonomous_system', {}).get('bgp_prefix'), 'ASN': result.get('autonomous_system', {}).get('asn'), 'Service': [{ 'Port': service.get('port'), 'Service Name': service.get('service_name') } for service in result.get('services', [])], 'Last Updated': result.get('last_updated_at') } human_readable = tableToMarkdown(f'Information for IP {query}', content) return CommandResults( readable_output=human_readable, outputs_prefix='Censys.View', outputs_key_field='ip', outputs=result, raw_response=res ) else: metadata = res.get('metadata') content = { 'SHA 256': res.get('fingerprint_sha256'), 'Tags': res.get('tags'), 'Source': metadata.get('source'), 'Added': metadata.get('added_at'), 'Updated': metadata.get('updated_at') } human_readable = tableToMarkdown('Information for certificate ', content) return CommandResults( readable_output=human_readable, outputs_prefix='Censys.View', outputs_key_field='fingerprint_sha256', outputs=res, raw_response=res ) ","def censys_view_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" Returns host information for the specified IP address or structured certificate data for the specified SHA-256 """""" index = args.get('index', '') query = args.get('query', '') res = client.censys_view_request(index, query) if index == 'ipv4': result = res.get('result', {}) content = { 'Name': result.get('autonomous_system', {}).get('name'), 'Bgp Prefix': result.get('autonomous_system', {}).get('bgp_prefix'), 'ASN': result.get('autonomous_system', {}).get('asn'), 'Service': [{ 'Port': service.get('port'), 'Service Name': service.get('service_name') } for service in result.get('services', [])], 'Last Updated': result.get('last_updated_at') } human_readable = tableToMarkdown(f'Information for IP {query}', content) return CommandResults( readable_output=human_readable, outputs_prefix='Censys.View', outputs_key_field='ip', outputs=result, raw_response=res ) else: metadata = res.get('metadata') content = { 'SHA 256': res.get('fingerprint_sha256'), 'Tags': res.get('tags'), 'Source': metadata.get('source'), 'Added': metadata.get('added_at'), 'Updated': metadata.get('updated_at') } human_readable = tableToMarkdown('Information for certificate', content) return CommandResults( readable_output=human_readable, outputs_prefix='Censys.View', outputs_key_field='fingerprint_sha256', outputs=res, raw_response=res ) " 45014,"def wait_for_job_run( account_id: int, token: str, run_id: int, max_wait_time: int = None, domain: str = None, ) -> dict: """""" Get a dbt Cloud job run. Please note that this function will fail if any call to dbt Cloud APIs fail. Args: - account_id (int): dbt Cloud account ID - token (str): dbt Cloud token - run_id (int): dbt Cloud job run ID - max_wait_time (int): the number od seconds to wait for the job to complete - domain (str): The domain the function should call, normally cloud.getdbt.com Returns: - The job run result, namely the ""data"" key in the API response Raises: - DbtCloudRunFailed: if ""finished_at"" is not None and the result status == 20 - DbtCloudRunCanceled: if ""finished_at"" is not None and the result status == 30 - DbtCloudRunTimedOut: if run does not finish before provided max_wait_time """""" wait_time_between_api_calls = 10 elapsed_wait_time = 0 while not max_wait_time or elapsed_wait_time <= max_wait_time: get_run_request = requests.get( url=__DBT_CLOUD_GET_RUN_API_ENDPOINT_V2.format( accountId=account_id, runId=run_id, apiDomain=domain ), headers={""Authorization"": f""Bearer {token}""}, ) if get_run_request.status_code != 200: raise GetDbtCloudRunFailed(get_run_request.reason) result = get_run_request.json()[""data""] if result[""finished_at""]: if result[""status""] == 10: return result elif result[""status""] == 20: raise DbtCloudRunFailed(f""Job run with ID: {run_id} failed."") elif result[""status""] == 30: raise DbtCloudRunCanceled(f""Job run with ID: {run_id} cancelled."") sleep(wait_time_between_api_calls) elapsed_wait_time += wait_time_between_api_calls raise DbtCloudRunTimedOut( f""Max attempts reached while checking status of job run with ID: {run_id}"" ) ","def wait_for_job_run( account_id: int, token: str, run_id: int, max_wait_time: int = None, domain: str = None, ) -> dict: """""" Get a dbt Cloud job run. Please note that this function will fail if any call to dbt Cloud APIs fail. Args: - account_id (int): dbt Cloud account ID - token (str): dbt Cloud token - run_id (int): dbt Cloud job run ID - max_wait_time (int): the number od seconds to wait for the job to complete - domain (str): The domain the function should call (e.g. `cloud.getdbt.com`). Returns: - The job run result, namely the ""data"" key in the API response Raises: - DbtCloudRunFailed: if ""finished_at"" is not None and the result status == 20 - DbtCloudRunCanceled: if ""finished_at"" is not None and the result status == 30 - DbtCloudRunTimedOut: if run does not finish before provided max_wait_time """""" wait_time_between_api_calls = 10 elapsed_wait_time = 0 while not max_wait_time or elapsed_wait_time <= max_wait_time: get_run_request = requests.get( url=__DBT_CLOUD_GET_RUN_API_ENDPOINT_V2.format( accountId=account_id, runId=run_id, apiDomain=domain ), headers={""Authorization"": f""Bearer {token}""}, ) if get_run_request.status_code != 200: raise GetDbtCloudRunFailed(get_run_request.reason) result = get_run_request.json()[""data""] if result[""finished_at""]: if result[""status""] == 10: return result elif result[""status""] == 20: raise DbtCloudRunFailed(f""Job run with ID: {run_id} failed."") elif result[""status""] == 30: raise DbtCloudRunCanceled(f""Job run with ID: {run_id} cancelled."") sleep(wait_time_between_api_calls) elapsed_wait_time += wait_time_between_api_calls raise DbtCloudRunTimedOut( f""Max attempts reached while checking status of job run with ID: {run_id}"" ) " 30184,"def submit_job(): opts = [ [""--{}"".format(key[4:].replace(""_"", ""-"")), value] for key, value in env.items() if key.startswith(""TBV_"") and key != ""TBV_CLASS"" ] command = [ ""spark-submit"", ""--master"", ""yarn"", ""--deploy-mode"", ""client"", ""--class"", environ[""TBV_CLASS""], artifact_file, ] + [v for opt in opts for v in opt if v] call_exit_errors(command) ","def submit_job(): opts = [ [""--{}"".format(key[4:].replace(""_"", ""-"")), value] for key, value in environ.items() if key.startswith(""TBV_"") and key != ""TBV_CLASS"" ] command = [ ""spark-submit"", ""--master"", ""yarn"", ""--deploy-mode"", ""client"", ""--class"", environ[""TBV_CLASS""], artifact_file, ] + [v for opt in opts for v in opt if v] call_exit_errors(command) " 7374,"def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """""" Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out ","def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """""" Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between the two arrays. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out " 58325,"def linspace_int(Nx, Ny, periodic=True): """"""Provide a range of `Ny` equispaced integers between `0` and `Nx-1`. Parameters ---------- Nx: int Range of integers Ny: int Number of integers periodic: bool, optional Whether the vector is periodic. Determines if the Nx == 0. Default: True Returns ------- vector Generated vectors. Examples -------- >>> linspace_int(10, 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> linspace_int(10, 4) array([0, 2, 5, 7]) >>> linspace_int(10, 5) array([0, 2, 4, 6, 8]) >>> """""" if periodic: jj = np.linspace(0, Nx, Ny+1)[:-1] else: jj = np.linspace(0, Nx-1, Ny) jj = jj.astype(int) return jj ","def linspace_int(Nx, Ny, periodic=True): """"""Provide a range of `Ny` equispaced integers between `0` and `Nx-1`. Parameters ---------- Nx: int Range of integers Ny: int Number of integers periodic: bool, optional Whether the vector is periodic. Determines if the Nx == 0. Default: True Returns ------- integers: ndarray The list of integers. Generated vectors. Examples -------- >>> linspace_int(10, 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> linspace_int(10, 4) array([0, 2, 5, 7]) >>> linspace_int(10, 5) array([0, 2, 4, 6, 8]) >>> """""" if periodic: jj = np.linspace(0, Nx, Ny+1)[:-1] else: jj = np.linspace(0, Nx-1, Ny) jj = jj.astype(int) return jj " 7856,"def test_atoms_material_cell(uo2, water): """""" Test if correct number of atoms is returned. Also check if Cell.atoms still works after volume/material was changed """""" c = openmc.Cell(fill=uo2) c.volume = 2.0 expected_nucs = ['U235', 'O16'] # Precalculate the expected number of atoms M = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3) expected_atoms = list() expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 2.0) # U235 expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 2.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == t[1] # Change volume and check if OK c.volume = 3.0 expected_atoms = list() expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 3.0) # U235 expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 3.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) # Change material and check if OK c.fill = water expected_nucs = ['H1', 'O16'] M = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3) expected_atoms = list() expected_atoms.append(2/3 * water.density/M * AVOGADRO * 3.0) # H1 expected_atoms.append(1/3 * water.density/M * AVOGADRO * 3.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) ","def test_atoms_material_cell(uo2, water): """""" Test if correct number of atoms is returned. Also check if Cell.atoms still works after volume/material was changed """""" c = openmc.Cell(fill=uo2) c.volume = 2.0 expected_nucs = ['U235', 'O16'] # Precalculate the expected number of atoms M = (atomic_mass('U235') + 2 * atomic_mass('O16')) / 3 expected_atoms = list() expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 2.0) # U235 expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 2.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == t[1] # Change volume and check if OK c.volume = 3.0 expected_atoms = list() expected_atoms.append(1/3 * uo2.density/M * AVOGADRO * 3.0) # U235 expected_atoms.append(2/3 * uo2.density/M * AVOGADRO * 3.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) # Change material and check if OK c.fill = water expected_nucs = ['H1', 'O16'] M = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3) expected_atoms = list() expected_atoms.append(2/3 * water.density/M * AVOGADRO * 3.0) # H1 expected_atoms.append(1/3 * water.density/M * AVOGADRO * 3.0) # O16 tuples = list(c.atoms.items()) for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) " 32123,"def get_incident_init_params(): params_dict = { 'threat_level': demisto.params().get('threat_level', None), 'threat_type': demisto.params().get('threat_type', None) } return {param_k: param_v for param_k, param_v in params_dict.items() if param_v is not None and param_v} ","def get_incident_init_params(): params_dict = { 'threat_level': demisto.params().get('threat_level', None), 'threat_type': demisto.params().get('threat_type', None) } return {param_k: param_v for param_k, param_v in params_dict.items() if param_v} " 31706,"def filter_by_threshold(context: list, threshold: float): low = [] high = [] for item in context: if item.get('similarity') >= threshold: high.append(item) else: campaign = _get_incident_campaign(item['id']) if campaign: high.append(item) else: low.append(item) return low, high ","def filter_by_threshold(context: list, threshold: float) -> Tuple[list, list]: low = [] high = [] for item in context: if item.get('similarity') >= threshold: high.append(item) else: campaign = _get_incident_campaign(item['id']) if campaign: high.append(item) else: low.append(item) return low, high " 5369,"def test_joined(): """""" Test to ensure the current node joined to a cluster with node user@host """""" ret = {""name"": ""salt"", ""changes"": {}, ""result"": True, ""comment"": """"} mock = MagicMock(side_effect=[[""rahulha@salt""], [""""], [""""]]) with patch.dict(rabbitmq_cluster.__salt__, {""rabbitmq.cluster_status"": mock}): ret.update({""comment"": ""Already in cluster""}) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret with patch.dict(rabbitmq_cluster.__opts__, {""test"": True}): ret.update( { ""result"": None, ""comment"": ""Node is set to join "" ""cluster rahulha@salt"", ""changes"": {""new"": ""rahulha@salt"", ""old"": """"}, } ) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret with patch.dict(rabbitmq_cluster.__opts__, {""test"": False}): mock = MagicMock(return_value={""Error"": ""ERR""}) with patch.dict(rabbitmq_cluster.__salt__, {""rabbitmq.join_cluster"": mock}): ret.update({""result"": False, ""comment"": ""ERR"", ""changes"": {}}) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret ","def test_joined(): """""" Test to ensure the current node joined to a cluster with node user@host """""" ret = {""name"": ""salt"", ""changes"": {}, ""result"": True, ""comment"": """"} mock = MagicMock(side_effect=[[""rahulha@salt""], [""""], [""""]]) with patch.dict(rabbitmq_cluster.__salt__, {""rabbitmq.cluster_status"": mock}): ret.update({""comment"": ""Already in cluster""}) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret with patch.dict(rabbitmq_cluster.__opts__, {""test"": True}): ret.update( { ""result"": None, ""comment"": ""Node is set to join cluster rahulha@salt"", ""changes"": {""new"": ""rahulha@salt"", ""old"": """"}, } ) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret with patch.dict(rabbitmq_cluster.__opts__, {""test"": False}): mock = MagicMock(return_value={""Error"": ""ERR""}) with patch.dict(rabbitmq_cluster.__salt__, {""rabbitmq.join_cluster"": mock}): ret.update({""result"": False, ""comment"": ""ERR"", ""changes"": {}}) assert rabbitmq_cluster.joined(""salt"", ""salt"", ""rahulha"") == ret " 7131,"def test_guess_spatial_dimensions(): im1 = np.zeros((5, 5)) im2 = np.zeros((5, 5, 5)) im3 = np.zeros((5, 5, 3)) im4 = np.zeros((5, 5, 5, 3)) im5 = np.zeros((5,)) testing.assert_equal(guess_spatial_dimensions(im1), 2) testing.assert_equal(guess_spatial_dimensions(im2), 3) testing.assert_equal(guess_spatial_dimensions(im3), None) testing.assert_equal(guess_spatial_dimensions(im4), 3) with testing.raises(ValueError): guess_spatial_dimensions(im5) ","def test_guess_spatial_dimensions(): im1 = np.zeros((5, 5)) im2 = np.zeros((5, 5, 5)) im3 = np.zeros((5, 5, 3)) im4 = np.zeros((5, 5, 5, 3)) im5 = np.zeros((5,)) testing.assert_equal(guess_spatial_dimensions(im1), 2) testing.assert_equal(guess_spatial_dimensions(im2), 3) testing.assert_equal(_guess_spatial_dimensions(im3), None) testing.assert_equal(guess_spatial_dimensions(im4), 3) with testing.raises(ValueError): guess_spatial_dimensions(im5) " 36030,"def test_ambiguous_label_pk(setup_groups): """"""Situation: LABEL of entity_02 is exactly equal to ID of entity_01. Verify that using an ambiguous identifier gives precedence to the ID interpretation. Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL. """""" entity_01, entity_02, entity_03 = setup_groups param = GroupParamType() identifier = '{}'.format(entity_02.label) result = param.convert(identifier, None, None) assert result.uuid == entity_01.uuid identifier = '{}{}'.format(entity_02.label, OrmEntityLoader.label_ambiguity_breaker) result = param.convert(identifier, None, None) assert result.uuid == entity_02.uuid ","def test_ambiguous_label_pk(setup_groups): """"""Situation: LABEL of entity_02 is exactly equal to ID of entity_01. Verify that using an ambiguous identifier gives precedence to the ID interpretation. Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL. """""" entity_01, entity_02, entity_03 = setup_groups param = GroupParamType() identifier = '{}'.format(entity_02.label) entity_01, entity_02, _ = setup_groups assert result.uuid == entity_01.uuid identifier = '{}{}'.format(entity_02.label, OrmEntityLoader.label_ambiguity_breaker) result = param.convert(identifier, None, None) assert result.uuid == entity_02.uuid " 82,"def test_MockSite(): site = MockSite() assert list(site.docs) == [] site.add([{ ""key"": ""å"", ""type"": {""key"": ""/type/object""} }, { ""key"": ""ß"", ""type"": {""key"": ""/type/object""} } ]) assert list(site.docs) == [""å"", ""ß""] ","def test_MockSite(): site = MockSite() assert list(site.docs) == [] site.add([{ ""key"": ""å"", ""type"": {""key"": ""/type/object""} }, { ""key"": ""ß"", ""type"": {""key"": ""/type/object""} } ]) assert list(site.docs) == [""a"", ""ß""] " 55236,"def check_mpi(): """""" When called via:: # python3 -m netket.tools.check_mpi mpi_available : True mpi4jax_available : True n_nodes : 1 this will print out basic MPI information to make allow users to check whether the environment has been set up correctly. """""" if rank > 0: return info = { ""mpi_available"": mpi_available, ""mpi4jax_available"": mpi4jax_available, } if mpi_available: from mpi4py import MPI info.update( { ""n_nodes"": n_nodes, ""mpi4jax: MPI_version"": MPI.Get_version(), ""mpi4jax: MPI_library_version"": MPI.Get_library_version(), } ) maxkeylen = max(len(k) for k in info.keys()) for k, v in info.items(): print(f""{k:{maxkeylen}} : {v}"") ","def check_mpi(): """""" When called via:: # python3 -m netket.tools.check_mpi mpi_available : True mpi4jax_available : True n_nodes : 1 this will print out basic MPI information to make allow users to check whether the environment has been set up correctly. """""" if rank > 0: return info = { ""mpi_available"": mpi_available, ""mpi4jax_available"": mpi4jax_available, } if mpi_available: from mpi4py import MPI info.update( { ""n_nodes"": n_nodes, ""mpi4py | MPI version"": MPI.Get_version(), ""mpi4py | MPI library_version"": MPI.Get_library_version(), } ) maxkeylen = max(len(k) for k in info.keys()) for k, v in info.items(): print(f""{k:{maxkeylen}} : {v}"") " 40039,"def get_batch_kwargs(context, data_source_name=None, generator_name=None, data_asset_name=None, additional_batch_kwargs=None): """""" This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset. In order to get batch_kwargs this method needs data_source_name, generator_name and data_asset_name to combine them into a fully qualified data asset identifier(data_source_name/generator_name/data_asset_name). All three arguments are optional. If they are present, the method uses their values. Otherwise, the method prompts user to enter them interactively. Since it is possible for any of these three components to be passed to this method as empty values and to get their values after interacting with user, this method returns these components' values in case they changed. If the datasource has generators that can list available data asset names, the method lets user choose a name from that list (note: if there are multiple generators, user has to choose one first). If a name known to the chosen generator is selected, the generator will be able to yield batch_kwargs. The method also gives user an alternative to selecting the data asset name from the generator's list - user can type in a name for their data asset. In this case the in-memory generator will be used to construct a fully qualified data asset identifier (note: if the datasource has no in-memory generator configured, the method will exist with a failure). Since no generator can yield batch_kwargs for this data asset name, the method prompts user to specify batch_kwargs by choosing a file (if the datasource is pandas or spark) or by writing a SQL query (if the datasource points to a database). :param context: :param data_source_name: :param generator_name: :param data_asset_name: :param additional_batch_kwargs: :return: a tuple: (data_source_name, generator_name, data_asset_name, batch_kwargs). The components of the tuple were passed into the methods as optional arguments, but their values might have changed after this method's execution. If the returned batch_kwargs is None, it means that the generator will know to yield batch_kwargs when called. """""" msg_prompt_enter_data_asset_name = ""\nWhich data would you like to use? (Choose one)\n"" msg_prompt_enter_data_asset_name_suffix = "" Don't see the data asset in the list above?. Just type the name.\n"" data_source = select_datasource(context, data_source_name=data_source_name) batch_kwargs = None available_data_assets_dict = context.get_available_data_asset_names(datasource_names=data_source_name) if generator_name is None: generator_name = select_generator(context, data_source_name, available_data_assets_dict=available_data_assets_dict) # if we have a generator that can list available data asset names, let's list them if generator_name is not None and data_asset_name is None: # print(""Found {} datas"".format(len(available_data_assets[""names""]))) available_data_asset_names = [""{} ({})"".format(name[0], name[1]) for name in available_data_assets_dict[data_source_name][generator_name][""names""]] data_asset_names_to_display = available_data_asset_names[:5] choices = ""\n"".join(["" {}. {}"".format(i, name) for i, name in enumerate(data_asset_names_to_display, 1)]) prompt = msg_prompt_enter_data_asset_name + choices + ""\n"" + msg_prompt_enter_data_asset_name_suffix.format(len(data_asset_names_to_display)) selection = click.prompt(prompt, default=None, show_default=False) selection = selection.strip() try: data_asset_index = int(selection) - 1 try: data_asset_name = [name[0] for name in available_data_assets_dict[data_source_name][generator_name][""names""]][data_asset_index] except IndexError: pass except ValueError: data_asset_name = selection # If the data asset name is in the namespace (or we don't have it yet) if data_asset_name is None or data_asset_name not in [name[0] for name in available_data_assets_dict[data_source_name][generator_name][""names""]]: generator_name = None for generator_info in data_source.list_generators(): generator = data_source.get_generator(generator_info[""name""]) if isinstance(generator, MANUAL_GENERATOR_CLASSES): generator_name = generator_info[""name""] break if generator_name is None: raise ge_exceptions.DataContextError(""No manual generators found in datasource {0:s}"".format(data_source_name)) if isinstance(context.get_datasource(data_source_name), (PandasDatasource, SparkDFDatasource)): data_asset_name, batch_kwargs = _load_file_as_data_asset_from_pandas_datasource(context, data_source_name, generator_name=generator_name, data_asset_name=data_asset_name) elif isinstance(context.get_datasource(data_source_name), SqlAlchemyDatasource): data_asset_name, batch_kwargs = _load_query_as_data_asset_from_sqlalchemy_datasource(context, data_source_name, data_asset_name=data_asset_name) else: raise ge_exceptions.DataContextError(""Datasource {0:s} is expected to be a PandasDatasource or SparkDFDatasource, but is {1:s}"".format(data_source_name, str(type(context.get_datasource(data_source_name))))) return (data_source_name, generator_name, data_asset_name, batch_kwargs) ","def get_batch_kwargs(context, data_source_name=None, generator_name=None, data_asset_name=None, additional_batch_kwargs=None): """""" This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset. In order to get batch_kwargs this method needs data_source_name, generator_name and data_asset_name to combine them into a fully qualified data asset identifier(data_source_name/generator_name/data_asset_name). All three arguments are optional. If they are present, the method uses their values. Otherwise, the method prompts user to enter them interactively. Since it is possible for any of these three components to be passed to this method as empty values and to get their values after interacting with user, this method returns these components' values in case they changed. If the datasource has generators that can list available data asset names, the method lets user choose a name from that list (note: if there are multiple generators, user has to choose one first). If a name known to the chosen generator is selected, the generator will be able to yield batch_kwargs. The method also gives user an alternative to selecting the data asset name from the generator's list - user can type in a name for their data asset. In this case a manual batch kwargs generator will be used to construct a fully qualified data asset identifier (note: if the datasource has no in-memory generator configured, the method will exist with a failure). Since no generator can yield batch_kwargs for this data asset name, the method prompts user to specify batch_kwargs by choosing a file (if the datasource is pandas or spark) or by writing a SQL query (if the datasource points to a database). :param context: :param data_source_name: :param generator_name: :param data_asset_name: :param additional_batch_kwargs: :return: a tuple: (data_source_name, generator_name, data_asset_name, batch_kwargs). The components of the tuple were passed into the methods as optional arguments, but their values might have changed after this method's execution. If the returned batch_kwargs is None, it means that the generator will know to yield batch_kwargs when called. """""" msg_prompt_enter_data_asset_name = ""\nWhich data would you like to use? (Choose one)\n"" msg_prompt_enter_data_asset_name_suffix = "" Don't see the data asset in the list above?. Just type the name.\n"" data_source = select_datasource(context, data_source_name=data_source_name) batch_kwargs = None available_data_assets_dict = context.get_available_data_asset_names(datasource_names=data_source_name) if generator_name is None: generator_name = select_generator(context, data_source_name, available_data_assets_dict=available_data_assets_dict) # if we have a generator that can list available data asset names, let's list them if generator_name is not None and data_asset_name is None: # print(""Found {} datas"".format(len(available_data_assets[""names""]))) available_data_asset_names = [""{} ({})"".format(name[0], name[1]) for name in available_data_assets_dict[data_source_name][generator_name][""names""]] data_asset_names_to_display = available_data_asset_names[:5] choices = ""\n"".join(["" {}. {}"".format(i, name) for i, name in enumerate(data_asset_names_to_display, 1)]) prompt = msg_prompt_enter_data_asset_name + choices + ""\n"" + msg_prompt_enter_data_asset_name_suffix.format(len(data_asset_names_to_display)) selection = click.prompt(prompt, default=None, show_default=False) selection = selection.strip() try: data_asset_index = int(selection) - 1 try: data_asset_name = [name[0] for name in available_data_assets_dict[data_source_name][generator_name][""names""]][data_asset_index] except IndexError: pass except ValueError: data_asset_name = selection # If the data asset name is in the namespace (or we don't have it yet) if data_asset_name is None or data_asset_name not in [name[0] for name in available_data_assets_dict[data_source_name][generator_name][""names""]]: generator_name = None for generator_info in data_source.list_generators(): generator = data_source.get_generator(generator_info[""name""]) if isinstance(generator, MANUAL_GENERATOR_CLASSES): generator_name = generator_info[""name""] break if generator_name is None: raise ge_exceptions.DataContextError(""No manual generators found in datasource {0:s}"".format(data_source_name)) if isinstance(context.get_datasource(data_source_name), (PandasDatasource, SparkDFDatasource)): data_asset_name, batch_kwargs = _load_file_as_data_asset_from_pandas_datasource(context, data_source_name, generator_name=generator_name, data_asset_name=data_asset_name) elif isinstance(context.get_datasource(data_source_name), SqlAlchemyDatasource): data_asset_name, batch_kwargs = _load_query_as_data_asset_from_sqlalchemy_datasource(context, data_source_name, data_asset_name=data_asset_name) else: raise ge_exceptions.DataContextError(""Datasource {0:s} is expected to be a PandasDatasource or SparkDFDatasource, but is {1:s}"".format(data_source_name, str(type(context.get_datasource(data_source_name))))) return (data_source_name, generator_name, data_asset_name, batch_kwargs) " 30464,"def main(): # get incident fields res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'}) if is_error(res): return_error(res[0]['Contents']) fields = res[0]['Contents']['response'] # 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields incident_fields = [field for field in fields if field['id'].startswith('incident_')] # get arguments args = demisto.args() incident_type = args['incident_type'] exclude_system = False if 'custom' in args and argToBoolean(args['custom']) is True: exclude_system = True name_key = 'name' if 'short_names' in args and argToBoolean(args['short_names']) is True: name_key = 'cliName' explicit_only = False if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True: explicit_only = True # generate results types = [] if exclude_system is True: # only return non-system fields for field in incident_fields: # using multiple if statements for readability if field['system'] is False: # exclude system fields if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) else: # return all fields for field in incident_fields: # using multiple if statements for readability if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) # output results if 'pprint' in args and argToBoolean(args['pprint']) is True: demisto.results(pformat(types)) else: demisto.results(types) ","def main(): # get incident fields res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'}) if is_error(res): return_error(res[0]['Contents']) fields = res[0]['Contents']['response'] # 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields incident_fields = [field for field in fields if field['id'].startswith('incident_')] # get arguments args = demisto.args() incident_type = args['incident_type'] exclude_system = False if 'custom' in args and argToBoolean(args['custom']) is True: exclude_system = True name_key = 'name' if 'short_names' in args and argToBoolean(args['short_names']) is True: name_key = 'cliName' explicit_only = False if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True: explicit_only = True # generate results types = [] if exclude_system is True: # only return non-system fields for field in incident_fields: # using multiple if statements for readability if field['system'] is False: # exclude system fields if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) else: # return all fields for field in incident_fields: # using multiple if statements for readability if field['associatedToAll'] is True and explicit_only is False: # if explicit_only is false, include fields associated to all incident types types.append(field[name_key]) elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']: # include fields where incident type is in associatedTypes types.append(field[name_key]) # output results if 'pprint' in args and argToBoolean(args['pprint']) is True: demisto.results(pformat(types)) else: demisto.results(types) " 36154,"def _start_of_option(value: str) -> bool: """"""Check if the value looks like the start of an option. This is an adaptation of :py:func:`click.shell_completion._start_of_option` that simply add ``.`` and ``~`` as the characters that are interpreted as the start of a filepath, and so not the start of an option. This will ensure that filepaths starting with these characters are autocompleted once again. """""" if not value: return False # Allow characters that typically designate the start of a path. return not value[0].isalnum() and value[0] not in ['/', '.', '~'] ","def _start_of_option(value: str) -> bool: """"""Check if the value looks like the start of an option. This is an adaptation of :py:func:`click.shell_completion._start_of_option` that simply add ``.`` and ``~`` as the characters that are interpreted as the start of a filepath, and so not the start of an option. This will ensure that filepaths starting with these characters are autocompleted once again. """""" if not value: return False # Allow characters that typically designate the start of a path. return not value[0].isalnum() and value[0] not in ['/', '.', '~', '$'] " 54460,"def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> ""go.Figure"": layout = go.Layout(title=""Contour Plot"",) trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE] if len(trials) == 0: _logger.warning(""Your study does not have any completed trials."") return go.Figure(data=[], layout=layout) all_params = {p_name for t in trials for p_name in t.params.keys()} if params is None: sorted_params = sorted(list(all_params)) elif len(params) <= 1: _logger.warning(""The length of params must be greater than 1."") return go.Figure(data=[], layout=layout) else: for input_p_name in params: if input_p_name not in all_params: raise ValueError(""Parameter {} does not exist in your study."".format(input_p_name)) sorted_params = sorted(list(set(params))) padding_ratio = 0.05 param_values_range = {} for p_name in sorted_params: values = [t.params[p_name] for t in trials if p_name in t.params] if _is_log_scale(trials, p_name): padding = (math.log10(max(values)) - math.log10(min(values))) * padding_ratio min_value = math.pow(10, math.log10(min(values)) - padding) max_value = math.pow(10, math.log10(max(values)) + padding) else: padding = (max(values) - min(values)) * padding_ratio min_value = min(values) - padding max_value = max(values) + padding param_values_range[p_name] = (min_value, max_value) if len(sorted_params) == 2: x_param = sorted_params[0] y_param = sorted_params[1] sub_plots = _generate_contour_subplot( trials, x_param, y_param, study.direction, param_values_range ) figure = go.Figure(data=sub_plots, layout=layout) figure.update_xaxes(title_text=x_param, range=param_values_range[x_param]) figure.update_yaxes(title_text=y_param, range=param_values_range[y_param]) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type=""log"") if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type=""log"") else: figure = make_subplots( rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True ) figure.update_layout(layout) showscale = True # showscale option only needs to be specified once for x_i, x_param in enumerate(sorted_params): for y_i, y_param in enumerate(sorted_params): if x_param == y_param: figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1) else: sub_plots = _generate_contour_subplot( trials, x_param, y_param, study.direction, param_values_range ) contour = sub_plots[0] scatter = sub_plots[1] contour.update(showscale=showscale) # showscale's default is True if showscale: showscale = False figure.add_trace(contour, row=y_i + 1, col=x_i + 1) figure.add_trace(scatter, row=y_i + 1, col=x_i + 1) figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1) figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type=""log"", row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type=""log"", row=y_i + 1, col=x_i + 1) if x_i == 0: figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1) if y_i == len(sorted_params) - 1: figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1) return figure ","def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> ""go.Figure"": layout = go.Layout(title=""Contour Plot"",) trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE] if len(trials) == 0: _logger.warning(""Your study does not have any completed trials."") return go.Figure(data=[], layout=layout) all_params = {p_name for t in trials for p_name in t.params.keys()} if params is None: sorted_params = sorted(list(all_params)) elif len(params) <= 1: _logger.warning(""The length of params must be greater than 1."") return go.Figure(data=[], layout=layout) else: for input_p_name in params: if input_p_name not in all_params: raise ValueError(""Parameter {} does not exist in your study."".format(input_p_name)) sorted_params = sorted(list(set(params))) padding_ratio = 0.05 param_values_range = {} for p_name in sorted_params: values = [t.params[p_name] for t in trials if p_name in t.params] max_value = max(values) min_value = min(values) if _is_log_scale(trials, p_name): padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio min_value = math.pow(10, math.log10(min_value) - padding) max_value = math.pow(10, math.log10(max_value) + padding) else: padding = (max_value - min_value) * padding_ratio min_value = min_value - padding max_value = max_value + padding param_values_range[p_name] = (min_value, max_value) if len(sorted_params) == 2: x_param = sorted_params[0] y_param = sorted_params[1] sub_plots = _generate_contour_subplot( trials, x_param, y_param, study.direction, param_values_range ) figure = go.Figure(data=sub_plots, layout=layout) figure.update_xaxes(title_text=x_param, range=param_values_range[x_param]) figure.update_yaxes(title_text=y_param, range=param_values_range[y_param]) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type=""log"") if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type=""log"") else: figure = make_subplots( rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True ) figure.update_layout(layout) showscale = True # showscale option only needs to be specified once for x_i, x_param in enumerate(sorted_params): for y_i, y_param in enumerate(sorted_params): if x_param == y_param: figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1) else: sub_plots = _generate_contour_subplot( trials, x_param, y_param, study.direction, param_values_range ) contour = sub_plots[0] scatter = sub_plots[1] contour.update(showscale=showscale) # showscale's default is True if showscale: showscale = False figure.add_trace(contour, row=y_i + 1, col=x_i + 1) figure.add_trace(scatter, row=y_i + 1, col=x_i + 1) figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1) figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type=""log"", row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type=""log"", row=y_i + 1, col=x_i + 1) if x_i == 0: figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1) if y_i == len(sorted_params) - 1: figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1) return figure " 36926,"def test_show_multiple_commits(tmp_dir, scm, dvc, exp_stage): from dvc.exceptions import InvalidArgumentError init_rev = scm.get_rev() tmp_dir.scm_gen(""file"", ""file"", ""commit"") next_rev = scm.get_rev() with pytest.raises(InvalidArgumentError): dvc.experiments.show(num=-1) expected = {""workspace"", init_rev, next_rev} results = dvc.experiments.show(num=2) assert set(results.keys()) == {""workspace"", init_rev, next_rev} expected = {""workspace""} | set(scm.branch_revs(""master"")) results = dvc.experiments.show(all_commits=True) assert set(results.keys()) == expected results = dvc.experiments.show(num=100) assert set(results.keys()) == expected ","def test_show_multiple_commits(tmp_dir, scm, dvc, exp_stage): from dvc.exceptions import InvalidArgumentError init_rev = scm.get_rev() tmp_dir.scm_gen(""file"", ""file"", ""commit"") next_rev = scm.get_rev() with pytest.raises(InvalidArgumentError): dvc.experiments.show(num=-1) expected = {""workspace"", init_rev, next_rev} results = dvc.experiments.show(num=2) assert set(results.keys()) == expected expected = {""workspace""} | set(scm.branch_revs(""master"")) results = dvc.experiments.show(all_commits=True) assert set(results.keys()) == expected results = dvc.experiments.show(num=100) assert set(results.keys()) == expected " 5664,"def ellipap(N, rp, rs): """"""Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, ""Filter Design for Signal Processing"", Chapters 5 and 12. """""" if abs(int(N)) != N: raise ValueError(""Filter order must be a nonnegative integer"") elif N == 0: # Avoid divide-by-zero warning # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) elif N == 1: p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) k = -p z = [] return asarray(z), asarray(p), k eps_sq = 10 ** (0.1 * rp) - 1 eps = np.sqrt(eps_sq) ck1_sq = eps_sq / (10 ** (0.1 * rs) - 1) if ck1_sq == 0: raise ValueError(""Cannot design a filter with given rp and rs"" "" specifications."") val = np.array([special.ellipk(ck1_sq), special.ellipkm1(ck1_sq)]) krat = N * val[0] / val[1] m = _solve_kratio(krat) capk = special.ellipk(m) j = numpy.arange(1 - N % 2, N, 2) jj = len(j) [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) z = 1.0 / (sqrt(m) * snew) z = 1j * z z = numpy.concatenate((z, conjugate(z))) r = _solve_vratio(1. / eps, ck1_sq) v0 = capk * r / (N * val[0]) [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) if N % 2: newp = numpy.compress(abs(p.imag) > EPSILON * numpy.sqrt(numpy.sum(p * numpy.conjugate(p), axis=0).real), p, axis=-1) p = numpy.concatenate((p, conjugate(newp))) else: p = numpy.concatenate((p, conjugate(p))) k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real if N % 2 == 0: k = k / numpy.sqrt((1 + eps_sq)) return z, p, k ","def ellipap(N, rp, rs): """"""Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, ""Filter Design for Signal Processing"", Chapters 5 and 12. """""" if abs(int(N)) != N: raise ValueError(""Filter order must be a nonnegative integer"") elif N == 0: # Avoid divide-by-zero warning # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) elif N == 1: p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) k = -p z = [] return asarray(z), asarray(p), k eps_sq = 10 ** (0.1 * rp) - 1 eps = np.sqrt(eps_sq) ck1_sq = eps_sq / (10 ** (0.1 * rs) - 1) if ck1_sq == 0: raise ValueError(""Cannot design a filter with given rp and rs"" "" specifications."") val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq) krat = N * val[0] / val[1] m = _solve_kratio(krat) capk = special.ellipk(m) j = numpy.arange(1 - N % 2, N, 2) jj = len(j) [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) z = 1.0 / (sqrt(m) * snew) z = 1j * z z = numpy.concatenate((z, conjugate(z))) r = _solve_vratio(1. / eps, ck1_sq) v0 = capk * r / (N * val[0]) [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) if N % 2: newp = numpy.compress(abs(p.imag) > EPSILON * numpy.sqrt(numpy.sum(p * numpy.conjugate(p), axis=0).real), p, axis=-1) p = numpy.concatenate((p, conjugate(newp))) else: p = numpy.concatenate((p, conjugate(p))) k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real if N % 2 == 0: k = k / numpy.sqrt((1 + eps_sq)) return z, p, k " 45811,"def load_depth(file_name): """"""Loads the depth using the sintel SDK and converts to torch.Tensor"""""" if not os.path.isfile(file_name): raise AssertionError(""Invalid file {}"".format(file_name)) import sintel_io depth = sintel_io.depth_read(file_name) return torch.from_numpy(depth).view(1, 1, *depth.shape).float() ","def load_depth(file_name): """"""Loads the depth using the sintel SDK and converts to torch.Tensor"""""" if not os.path.isfile(file_name): raise FileExistsError(""Invalid file {}"".format(file_name)) import sintel_io depth = sintel_io.depth_read(file_name) return torch.from_numpy(depth).view(1, 1, *depth.shape).float() " 59343,"def _parse_args_and_run_subcommand(argv): parser = ArgumentParser(prog=""anaconda-project"", description=""Actions on projects (runnable projects)."") subparsers = parser.add_subparsers(help=""Sub-commands"") parser.add_argument('-v', '--version', action='version', version=version) parser.add_argument('--verbose', action='store_true', default=False, help=""show verbose debugging details"") def add_directory_arg(preset): preset.add_argument('--directory', metavar='PROJECT_DIR', default='.', help=""Project directory containing anaconda-project.yml (defaults to current directory)"") def add_env_spec_arg(preset): preset.add_argument('--env-spec', metavar='ENVIRONMENT_SPEC_NAME', default=None, action='store', help=""An environment spec name from anaconda-project.yml"") def add_prepare_args(preset, include_command=True): add_directory_arg(preset) add_env_spec_arg(preset) all_supported_modes = list(_all_ui_modes) # we don't support ""ask about every single thing"" mode yet. all_supported_modes.remove(UI_MODE_TEXT_ASK_QUESTIONS) preset.add_argument('--mode', metavar='MODE', default=UI_MODE_TEXT_DEVELOPMENT_DEFAULTS_OR_ASK, choices=_all_ui_modes, action='store', help=""One of "" + "", "".join(_all_ui_modes)) if include_command: preset.add_argument( '--command', metavar='COMMAND_NAME', default=None, action='store', help=""A command name from anaconda-project.yml (env spec for this command will be used)"") def add_env_spec_name_arg(preset, required): preset.add_argument('-n', '--name', metavar='ENVIRONMENT_SPEC_NAME', required=required, action='store', help=""Name of the environment spec from anaconda-project.yml"") preset = subparsers.add_parser('init', help=""Initialize a directory with default project configuration"") add_directory_arg(preset) preset.add_argument('--empty-environment', action='store_true', help=""Do not add the default package set to the environment."", default=None) preset.add_argument('-y', '--yes', action='store_true', help=""Assume yes to all confirmation prompts"", default=None) preset.set_defaults(main=init.main) preset = subparsers.add_parser('run', help=""Run the project, setting up requirements first"") add_prepare_args(preset, include_command=False) preset.add_argument('command', metavar='COMMAND_NAME', default=None, nargs='?', help=""A command name from anaconda-project.yml"") preset.add_argument('extra_args_for_command', metavar='EXTRA_ARGS_FOR_COMMAND', default=None, nargs=REMAINDER) preset.set_defaults(main=run.main) preset = subparsers.add_parser('prepare', help=""Set up the project requirements, but does not run the project"") preset.add_argument('--all', action='store_true', help=""Prepare all environments"", default=None) preset.add_argument('--refresh', action='store_true', help='Remove and recreate the environment', default=None) add_prepare_args(preset) preset.set_defaults(main=prepare.main) preset = subparsers.add_parser('clean', help=""Removes generated state (stops services, deletes environment files, etc)"") add_directory_arg(preset) preset.set_defaults(main=clean.main) if not anaconda_project._beta_test_mode: preset = subparsers.add_parser('activate', help=""Set up the project and output shell export commands reflecting the setup"") add_prepare_args(preset) preset.set_defaults(main=activate.main) preset = subparsers.add_parser('archive', help=""Create a .zip, .tar.gz, or .tar.bz2 archive with project files in it"") add_directory_arg(preset) preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.set_defaults(main=archive.main) preset = subparsers.add_parser('unarchive', help=""Unpack a .zip, .tar.gz, or .tar.bz2 archive with project files in it"") preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.add_argument('directory', metavar='DESTINATION_DIRECTORY', default=None, nargs='?') preset.set_defaults(main=unarchive.main) preset = subparsers.add_parser('upload', help=""Upload the project to Anaconda Cloud"") add_directory_arg(preset) preset.add_argument('-p', '--private', action='store_true', help=""Upload a private project"", default=None) preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument('-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.add_argument('--suffix', metavar='SUFFIX', help='Project archive suffix (.tar.gz, .tar.bz2, .zip)', default='.tar.bz2', choices=['.tar.gz', '.tar.bz2', '.zip']) preset.set_defaults(main=upload.main) preset = subparsers.add_parser('download', help=""Download the project from Anaconda Cloud"") add_directory_arg(preset) preset.add_argument('project', help='The project to download as /. If ' + 'has spaces inclose everything in quotes ""/"".' + 'If specified as then the logged-in username is used.') preset.add_argument('--no-unpack', action='store_true', help='Do not unpack the project archive.') preset.add_argument( '--parent_dir', default=None, help='Download archive to specific directory, otherwise downloaded to current working directory.') preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument('-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.set_defaults(main=download.main) preset = subparsers.add_parser('dockerize', help=""Build a docker image of the Anaconda Project."") add_directory_arg(preset) preset.add_argument('-t', '--tag', default=None, help='Tag of the output docker image in the format name:tag. ' 'Default: "":latest"", where is taken from ' 'the name tag in the anaconda-project.yml file.') preset.add_argument( '--command', default='default', help='Select the command to run. If unspecified the ""default"" command is run.\nThe default command ' 'is defined as either the command named ""default"" or the first command specified in the ' 'anaconda-project.yml file.') preset.add_argument('--builder-image', default='{}:latest'.format(DEFAULT_BUILDER_IMAGE), help='The s2i builder image') preset.add_argument('build_args', default=None, nargs=""*"", help='Optional arguments for the s2i build command. ' 'See the output of ""s2i build --help"" for the available arguments. ' 'It is recommended to include a -- separator before supplying these arguments.') preset.set_defaults(main=dockerize.main) preset = subparsers.add_parser('add-variable', help=""Add a required environment variable to the project"") add_env_spec_arg(preset) preset.add_argument('vars_to_add', metavar='VARS_TO_ADD', default=None, nargs=REMAINDER) preset.add_argument('--default', metavar='DEFAULT_VALUE', default=None, help='Default value if environment variable is unset') add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_add) preset = subparsers.add_parser('remove-variable', help=""Remove an environment variable from the project"") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_remove', metavar='VARS_TO_REMOVE', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_remove) preset = subparsers.add_parser('list-variables', help=""List all variables on the project"") add_env_spec_arg(preset) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_list) preset = subparsers.add_parser('set-variable', help=""Set an environment variable value in anaconda-project-local.yml"") add_env_spec_arg(preset) preset.add_argument('vars_and_values', metavar='VARS_AND_VALUES', default=None, nargs=REMAINDER) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_set) preset = subparsers.add_parser('unset-variable', help=""Unset an environment variable value from anaconda-project-local.yml"") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_unset', metavar='VARS_TO_UNSET', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_unset) preset = subparsers.add_parser('add-download', help=""Add a URL to be downloaded before running commands"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.add_argument('download_url', metavar='DOWNLOAD_URL', default=None) preset.add_argument('--filename', help=""The name to give the file/folder after downloading it"", default=None) preset.add_argument('--hash-algorithm', help=""Defines which hash algorithm to use"", default=None, choices=_hash_algorithms) preset.add_argument('--hash-value', help=""The expected checksum hash of the downloaded file"", default=None) preset.set_defaults(main=download_commands.main_add) preset = subparsers.add_parser('remove-download', help=""Remove a download from the project and from the filesystem"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.set_defaults(main=download_commands.main_remove) preset = subparsers.add_parser('list-downloads', help=""List all downloads on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=download_commands.main_list) service_types = RequirementsRegistry().list_service_types() service_choices = list(map(lambda s: s.name, service_types)) def add_service_variable_name(preset): preset.add_argument('--variable', metavar='ENV_VAR_FOR_SERVICE_ADDRESS', default=None) preset = subparsers.add_parser('add-service', help=""Add a service to be available before running commands"") add_directory_arg(preset) add_env_spec_arg(preset) add_service_variable_name(preset) preset.add_argument('service_type', metavar='SERVICE_TYPE', default=None, choices=service_choices) preset.set_defaults(main=service_commands.main_add) preset = subparsers.add_parser('remove-service', help=""Remove a service from the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('variable', metavar='SERVICE_REFERENCE', default=None) preset.set_defaults(main=service_commands.main_remove) preset = subparsers.add_parser('list-services', help=""List services present in the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=service_commands.main_list) def add_package_args(preset): preset.add_argument('-c', '--channel', metavar='CHANNEL', action='append', help='Channel to search for packages') preset.add_argument('packages', metavar='PACKAGES', default=None, nargs=REMAINDER) preset = subparsers.add_parser('add-env-spec', help=""Add a new environment spec to the project"") add_directory_arg(preset) add_package_args(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_add) preset = subparsers.add_parser('remove-env-spec', help=""Remove an environment spec from the project"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_remove) preset = subparsers.add_parser('list-env-specs', help=""List all environment specs for the project"") add_directory_arg(preset) preset.set_defaults(main=environment_commands.main_list_env_specs) preset = subparsers.add_parser('export-env-spec', help=""Save an environment spec as a conda environment file"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.add_argument('filename', metavar='ENVIRONMENT_FILE') preset.set_defaults(main=environment_commands.main_export) preset = subparsers.add_parser('lock', help=""Lock all packages at their current versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_lock) preset = subparsers.add_parser('unlock', help=""Remove locked package versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_unlock) preset = subparsers.add_parser('update', help=""Update all packages to their latest versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_update) preset = subparsers.add_parser('add-packages', help=""Add packages to one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_package_args(preset) preset.set_defaults(main=environment_commands.main_add_packages) preset = subparsers.add_parser('remove-packages', help=""Remove packages from one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('packages', metavar='PACKAGE_NAME', default=None, nargs='+') preset.set_defaults(main=environment_commands.main_remove_packages) preset = subparsers.add_parser('list-packages', help=""List packages for an environment on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_packages) def add_platforms_list(preset): preset.add_argument('platforms', metavar='PLATFORM_NAME', default=None, nargs='+') preset = subparsers.add_parser('add-platforms', help=""Add platforms to one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_add_platforms) preset = subparsers.add_parser('remove-platforms', help=""Remove platforms from one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_remove_platforms) preset = subparsers.add_parser('list-platforms', help=""List platforms for an environment on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_platforms) def add_command_name_arg(preset): preset.add_argument('name', metavar=""NAME"", help=""Command name used to invoke it"") preset = subparsers.add_parser('add-command', help=""Add a new command to the project"") add_directory_arg(preset) command_choices = list(ALL_COMMAND_TYPES) + ['ask'] command_choices.remove(""conda_app_entry"") # conda_app_entry is sort of silly and may go away preset.add_argument('--type', action=""store"", choices=command_choices, help=""Command type to add"") add_command_name_arg(preset) add_env_spec_arg(preset) preset.add_argument('--supports-http-options', dest='supports_http_options', action=""store_true"", help=""The command supports project's HTTP server options"") preset.add_argument('--no-supports-http-options', dest='supports_http_options', action=""store_false"", help="" The command does not support project's HTTP server options"") preset.add_argument('command', metavar=""COMMAND"", help=""Command line or app filename to add"") preset.set_defaults(main=command_commands.main, supports_http_options=None) preset = subparsers.add_parser('remove-command', help=""Remove a command from the project"") add_directory_arg(preset) add_command_name_arg(preset) preset.set_defaults(main=command_commands.main_remove) preset = subparsers.add_parser('list-default-command', help=""List only the default command on the project"") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_default) preset = subparsers.add_parser('list-commands', help=""List the commands on the project"") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_list) # argparse doesn't do this for us for whatever reason if len(argv) < 2: print(""Must specify a subcommand."", file=sys.stderr) parser.print_usage(file=sys.stderr) return 2 # argparse exits with 2 on bad args, copy that try: args = parser.parse_args(argv[1:]) except SystemExit as e: return e.code if args.verbose: logger = (logging.getLoggerClass())(name=""anaconda_project_verbose"") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(stream=sys.stderr) logger.addHandler(handler) push_verbose_logger(logger) try: # '--directory' is used for most subcommands; for unarchive, # args.directory is positional and may be None if 'directory' in args and args.directory is not None: args.directory = os.path.abspath(args.directory) return args.main(args) finally: if args.verbose: pop_verbose_logger() ","def _parse_args_and_run_subcommand(argv): parser = ArgumentParser(prog=""anaconda-project"", description=""Actions on projects (runnable projects)."") subparsers = parser.add_subparsers(help=""Sub-commands"") parser.add_argument('-v', '--version', action='version', version=version) parser.add_argument('--verbose', action='store_true', default=False, help=""show verbose debugging details"") def add_directory_arg(preset): preset.add_argument('--directory', metavar='PROJECT_DIR', default='.', help=""Project directory containing anaconda-project.yml (defaults to current directory)"") def add_env_spec_arg(preset): preset.add_argument('--env-spec', metavar='ENVIRONMENT_SPEC_NAME', default=None, action='store', help=""An environment spec name from anaconda-project.yml"") def add_prepare_args(preset, include_command=True): add_directory_arg(preset) add_env_spec_arg(preset) all_supported_modes = list(_all_ui_modes) # we don't support ""ask about every single thing"" mode yet. all_supported_modes.remove(UI_MODE_TEXT_ASK_QUESTIONS) preset.add_argument('--mode', metavar='MODE', default=UI_MODE_TEXT_DEVELOPMENT_DEFAULTS_OR_ASK, choices=_all_ui_modes, action='store', help=""One of "" + "", "".join(_all_ui_modes)) if include_command: preset.add_argument( '--command', metavar='COMMAND_NAME', default=None, action='store', help=""A command name from anaconda-project.yml (env spec for this command will be used)"") def add_env_spec_name_arg(preset, required): preset.add_argument('-n', '--name', metavar='ENVIRONMENT_SPEC_NAME', required=required, action='store', help=""Name of the environment spec from anaconda-project.yml"") preset = subparsers.add_parser('init', help=""Initialize a directory with default project configuration"") add_directory_arg(preset) preset.add_argument('--empty-environment', action='store_true', help=""Do not add the default package set to the environment."", default=None) preset.add_argument('-y', '--yes', action='store_true', help=""Assume yes to all confirmation prompts"", default=None) preset.set_defaults(main=init.main) preset = subparsers.add_parser('run', help=""Run the project, setting up requirements first"") add_prepare_args(preset, include_command=False) preset.add_argument('command', metavar='COMMAND_NAME', default=None, nargs='?', help=""A command name from anaconda-project.yml"") preset.add_argument('extra_args_for_command', metavar='EXTRA_ARGS_FOR_COMMAND', default=None, nargs=REMAINDER) preset.set_defaults(main=run.main) preset = subparsers.add_parser('prepare', help=""Set up the project requirements, but does not run the project"") preset.add_argument('--all', action='store_true', help=""Prepare all environments"", default=None) preset.add_argument('--refresh', action='store_true', help='Remove and recreate the environment', default=None) add_prepare_args(preset) preset.set_defaults(main=prepare.main) preset = subparsers.add_parser('clean', help=""Removes generated state (stops services, deletes environment files, etc)"") add_directory_arg(preset) preset.set_defaults(main=clean.main) if not anaconda_project._beta_test_mode: preset = subparsers.add_parser('activate', help=""Set up the project and output shell export commands reflecting the setup"") add_prepare_args(preset) preset.set_defaults(main=activate.main) preset = subparsers.add_parser('archive', help=""Create a .zip, .tar.gz, or .tar.bz2 archive with project files in it"") add_directory_arg(preset) preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.set_defaults(main=archive.main) preset = subparsers.add_parser('unarchive', help=""Unpack a .zip, .tar.gz, or .tar.bz2 archive with project files in it"") preset.add_argument('filename', metavar='ARCHIVE_FILENAME') preset.add_argument('directory', metavar='DESTINATION_DIRECTORY', default=None, nargs='?') preset.set_defaults(main=unarchive.main) preset = subparsers.add_parser('upload', help=""Upload the project to Anaconda Cloud"") add_directory_arg(preset) preset.add_argument('-p', '--private', action='store_true', help=""Upload a private project"", default=None) preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument('-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.add_argument('--suffix', metavar='SUFFIX', help='Project archive suffix (.tar.gz, .tar.bz2, .zip)', default='.tar.bz2', choices=['.tar.gz', '.tar.bz2', '.zip']) preset.set_defaults(main=upload.main) preset = subparsers.add_parser('download', help=""Download the project from Anaconda Cloud"") add_directory_arg(preset) preset.add_argument('project', help='The project to download as /. If ' + 'has spaces inclose everything in quotes ""/"".' + 'If specified as then the logged-in username is used.') preset.add_argument('--no-unpack', action='store_true', help='Do not unpack the project archive.') preset.add_argument( '--parent_dir', default=None, help='Download archive to specific directory, otherwise downloaded to current working directory.') preset.add_argument('-s', '--site', metavar='SITE', help='Select site to use') preset.add_argument('-t', '--token', metavar='TOKEN', help='Auth token or a path to a file containing a token') preset.add_argument('-u', '--user', metavar='USERNAME', help='User account, defaults to the current user') preset.set_defaults(main=download.main) preset = subparsers.add_parser('dockerize', help=""Build a docker image of the Anaconda Project."") add_directory_arg(preset) preset.add_argument('-t', '--tag', default=None, help='Tag of the output docker image in the format name:tag. ' 'Default: "":latest"", where is taken from ' 'the name tag in the anaconda-project.yml file.') preset.add_argument( '--command', default='default', help='Select the command to run. If unspecified the ""default"" command is run.\nThe default command ' 'is defined as either the command named ""default"" (if any) or (otherwise) ' 'the first command specified in the anaconda-project.yml file.') preset.add_argument('--builder-image', default='{}:latest'.format(DEFAULT_BUILDER_IMAGE), help='The s2i builder image') preset.add_argument('build_args', default=None, nargs=""*"", help='Optional arguments for the s2i build command. ' 'See the output of ""s2i build --help"" for the available arguments. ' 'It is recommended to include a -- separator before supplying these arguments.') preset.set_defaults(main=dockerize.main) preset = subparsers.add_parser('add-variable', help=""Add a required environment variable to the project"") add_env_spec_arg(preset) preset.add_argument('vars_to_add', metavar='VARS_TO_ADD', default=None, nargs=REMAINDER) preset.add_argument('--default', metavar='DEFAULT_VALUE', default=None, help='Default value if environment variable is unset') add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_add) preset = subparsers.add_parser('remove-variable', help=""Remove an environment variable from the project"") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_remove', metavar='VARS_TO_REMOVE', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_remove) preset = subparsers.add_parser('list-variables', help=""List all variables on the project"") add_env_spec_arg(preset) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_list) preset = subparsers.add_parser('set-variable', help=""Set an environment variable value in anaconda-project-local.yml"") add_env_spec_arg(preset) preset.add_argument('vars_and_values', metavar='VARS_AND_VALUES', default=None, nargs=REMAINDER) add_directory_arg(preset) preset.set_defaults(main=variable_commands.main_set) preset = subparsers.add_parser('unset-variable', help=""Unset an environment variable value from anaconda-project-local.yml"") add_env_spec_arg(preset) add_directory_arg(preset) preset.add_argument('vars_to_unset', metavar='VARS_TO_UNSET', default=None, nargs=REMAINDER) preset.set_defaults(main=variable_commands.main_unset) preset = subparsers.add_parser('add-download', help=""Add a URL to be downloaded before running commands"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.add_argument('download_url', metavar='DOWNLOAD_URL', default=None) preset.add_argument('--filename', help=""The name to give the file/folder after downloading it"", default=None) preset.add_argument('--hash-algorithm', help=""Defines which hash algorithm to use"", default=None, choices=_hash_algorithms) preset.add_argument('--hash-value', help=""The expected checksum hash of the downloaded file"", default=None) preset.set_defaults(main=download_commands.main_add) preset = subparsers.add_parser('remove-download', help=""Remove a download from the project and from the filesystem"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('filename_variable', metavar='ENV_VAR_FOR_FILENAME', default=None) preset.set_defaults(main=download_commands.main_remove) preset = subparsers.add_parser('list-downloads', help=""List all downloads on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=download_commands.main_list) service_types = RequirementsRegistry().list_service_types() service_choices = list(map(lambda s: s.name, service_types)) def add_service_variable_name(preset): preset.add_argument('--variable', metavar='ENV_VAR_FOR_SERVICE_ADDRESS', default=None) preset = subparsers.add_parser('add-service', help=""Add a service to be available before running commands"") add_directory_arg(preset) add_env_spec_arg(preset) add_service_variable_name(preset) preset.add_argument('service_type', metavar='SERVICE_TYPE', default=None, choices=service_choices) preset.set_defaults(main=service_commands.main_add) preset = subparsers.add_parser('remove-service', help=""Remove a service from the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('variable', metavar='SERVICE_REFERENCE', default=None) preset.set_defaults(main=service_commands.main_remove) preset = subparsers.add_parser('list-services', help=""List services present in the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=service_commands.main_list) def add_package_args(preset): preset.add_argument('-c', '--channel', metavar='CHANNEL', action='append', help='Channel to search for packages') preset.add_argument('packages', metavar='PACKAGES', default=None, nargs=REMAINDER) preset = subparsers.add_parser('add-env-spec', help=""Add a new environment spec to the project"") add_directory_arg(preset) add_package_args(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_add) preset = subparsers.add_parser('remove-env-spec', help=""Remove an environment spec from the project"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=True) preset.set_defaults(main=environment_commands.main_remove) preset = subparsers.add_parser('list-env-specs', help=""List all environment specs for the project"") add_directory_arg(preset) preset.set_defaults(main=environment_commands.main_list_env_specs) preset = subparsers.add_parser('export-env-spec', help=""Save an environment spec as a conda environment file"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.add_argument('filename', metavar='ENVIRONMENT_FILE') preset.set_defaults(main=environment_commands.main_export) preset = subparsers.add_parser('lock', help=""Lock all packages at their current versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_lock) preset = subparsers.add_parser('unlock', help=""Remove locked package versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_unlock) preset = subparsers.add_parser('update', help=""Update all packages to their latest versions"") add_directory_arg(preset) add_env_spec_name_arg(preset, required=False) preset.set_defaults(main=environment_commands.main_update) preset = subparsers.add_parser('add-packages', help=""Add packages to one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_package_args(preset) preset.set_defaults(main=environment_commands.main_add_packages) preset = subparsers.add_parser('remove-packages', help=""Remove packages from one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) preset.add_argument('packages', metavar='PACKAGE_NAME', default=None, nargs='+') preset.set_defaults(main=environment_commands.main_remove_packages) preset = subparsers.add_parser('list-packages', help=""List packages for an environment on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_packages) def add_platforms_list(preset): preset.add_argument('platforms', metavar='PLATFORM_NAME', default=None, nargs='+') preset = subparsers.add_parser('add-platforms', help=""Add platforms to one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_add_platforms) preset = subparsers.add_parser('remove-platforms', help=""Remove platforms from one or all project environments"") add_directory_arg(preset) add_env_spec_arg(preset) add_platforms_list(preset) preset.set_defaults(main=environment_commands.main_remove_platforms) preset = subparsers.add_parser('list-platforms', help=""List platforms for an environment on the project"") add_directory_arg(preset) add_env_spec_arg(preset) preset.set_defaults(main=environment_commands.main_list_platforms) def add_command_name_arg(preset): preset.add_argument('name', metavar=""NAME"", help=""Command name used to invoke it"") preset = subparsers.add_parser('add-command', help=""Add a new command to the project"") add_directory_arg(preset) command_choices = list(ALL_COMMAND_TYPES) + ['ask'] command_choices.remove(""conda_app_entry"") # conda_app_entry is sort of silly and may go away preset.add_argument('--type', action=""store"", choices=command_choices, help=""Command type to add"") add_command_name_arg(preset) add_env_spec_arg(preset) preset.add_argument('--supports-http-options', dest='supports_http_options', action=""store_true"", help=""The command supports project's HTTP server options"") preset.add_argument('--no-supports-http-options', dest='supports_http_options', action=""store_false"", help="" The command does not support project's HTTP server options"") preset.add_argument('command', metavar=""COMMAND"", help=""Command line or app filename to add"") preset.set_defaults(main=command_commands.main, supports_http_options=None) preset = subparsers.add_parser('remove-command', help=""Remove a command from the project"") add_directory_arg(preset) add_command_name_arg(preset) preset.set_defaults(main=command_commands.main_remove) preset = subparsers.add_parser('list-default-command', help=""List only the default command on the project"") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_default) preset = subparsers.add_parser('list-commands', help=""List the commands on the project"") add_directory_arg(preset) preset.set_defaults(main=command_commands.main_list) # argparse doesn't do this for us for whatever reason if len(argv) < 2: print(""Must specify a subcommand."", file=sys.stderr) parser.print_usage(file=sys.stderr) return 2 # argparse exits with 2 on bad args, copy that try: args = parser.parse_args(argv[1:]) except SystemExit as e: return e.code if args.verbose: logger = (logging.getLoggerClass())(name=""anaconda_project_verbose"") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(stream=sys.stderr) logger.addHandler(handler) push_verbose_logger(logger) try: # '--directory' is used for most subcommands; for unarchive, # args.directory is positional and may be None if 'directory' in args and args.directory is not None: args.directory = os.path.abspath(args.directory) return args.main(args) finally: if args.verbose: pop_verbose_logger() " 21259,"def run_scenario(input_file, timesteps, scenario, result_dir, dt, objective, plot_tuples=None, plot_sites_name=None, plot_periods=None, report_tuples=None, report_sites_name=None): """""" run an urbs model for given input, time steps and scenario Args: input_file: filename to an Excel spreadsheet for urbs.read_excel timesteps: a list of timesteps, e.g. range(0,8761) scenario: a scenario function that modifies the input data dict result_dir: directory name for result spreadsheet and plots dt: length of each time step (unit: hours) plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures) plot_sites_name: (optional) dict of names for sites in plot_tuples plot_periods: (optional) dict of plot periods(c.f. urbs.result_figures) report_tuples: (optional) list of (sit, com) tuples (c.f. urbs.report) report_sites_name: (optional) dict of names for sites in report_tuples Returns: the urbs model instance """""" # scenario name, read and modify data for scenario sce = scenario.__name__ data = urbs.read_excel(input_file) data = scenario(data) #urbs.validate_input(data) # create model prob = urbs.create_model(data, dt, timesteps, objective) # refresh time stamp string and create filename for logfile now = prob.created log_filename = os.path.join(result_dir, '{}.log').format(sce) # solve model and read results optim = SolverFactory('glpk') # cplex, glpk, gurobi, ... optim = setup_solver(optim, logfile=log_filename) result = optim.solve(prob, tee=True) # save problem solution (and input data) to HDF5 file urbs.save(prob, os.path.join(result_dir, '{}.h5'.format(sce))) # write report to spreadsheet urbs.report( prob, os.path.join(result_dir, '{}.xlsx').format(sce), report_tuples=report_tuples, report_sites_name=report_sites_name) # result plots urbs.result_figures( prob, os.path.join(result_dir, '{}'.format(sce)), timesteps, plot_title_prefix=sce.replace('_', ' '), plot_tuples=plot_tuples, plot_sites_name=plot_sites_name, periods=plot_periods, figure_size=(24, 9)) return prob ","def run_scenario(input_file, timesteps, scenario, result_dir, dt, objective, plot_tuples=None, plot_sites_name=None, plot_periods=None, report_tuples=None, report_sites_name=None): """""" run an urbs model for given input, time steps and scenario Args: input_file: filename to an Excel spreadsheet for urbs.read_excel timesteps: a list of timesteps, e.g. range(0,8761) scenario: a scenario function that modifies the input data dict result_dir: directory name for result spreadsheet and plots dt: length of each time step (unit: hours) plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures) plot_sites_name: (optional) dict of names for sites in plot_tuples plot_periods: (optional) dict of plot periods(c.f. urbs.result_figures) report_tuples: (optional) list of (sit, com) tuples (c.f. urbs.report) report_sites_name: (optional) dict of names for sites in report_tuples Returns: the urbs model instance """""" # scenario name, read and modify data for scenario sce = scenario.__name__ data = urbs.read_excel(input_file) data = scenario(data) urbs.validate_input(data) # create model prob = urbs.create_model(data, dt, timesteps, objective) # refresh time stamp string and create filename for logfile now = prob.created log_filename = os.path.join(result_dir, '{}.log').format(sce) # solve model and read results optim = SolverFactory('glpk') # cplex, glpk, gurobi, ... optim = setup_solver(optim, logfile=log_filename) result = optim.solve(prob, tee=True) # save problem solution (and input data) to HDF5 file urbs.save(prob, os.path.join(result_dir, '{}.h5'.format(sce))) # write report to spreadsheet urbs.report( prob, os.path.join(result_dir, '{}.xlsx').format(sce), report_tuples=report_tuples, report_sites_name=report_sites_name) # result plots urbs.result_figures( prob, os.path.join(result_dir, '{}'.format(sce)), timesteps, plot_title_prefix=sce.replace('_', ' '), plot_tuples=plot_tuples, plot_sites_name=plot_sites_name, periods=plot_periods, figure_size=(24, 9)) return prob " 41502,"def qmu_tilde(mu, data, pdf, init_pars, par_bounds): r"""""" The test statistic, :math:`\tilde{q}_{\mu}`, for establishing an upper limit on the strength parameter, :math:`\mu` for models with bounded POI. Args: mu (Number or Tensor): The signal strength parameter data (Tensor): The data to be considered pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation init_pars (`list`): Values to initialize the model parameters at for the fit par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit Returns: Float: The calculated test statistic, :math:`q_{\mu}` """""" if pdf.config.poi_index is None: raise UnspecifiedPOI( 'No POI is defined. A POI is required for profile likelihood based test statistics.' ) if par_bounds[pdf.config.poi_index][0] != 0: log.warning( 'qmu tilde test statistic used for fit configuration with POI not bounded at zero. Use qmu.' ) return _qmu_like(mu, data, pdf, init_pars, par_bounds) ","def qmu_tilde(mu, data, pdf, init_pars, par_bounds): r"""""" The test statistic, :math:`\tilde{q}_{\mu}`, for establishing an upper limit on the strength parameter, :math:`\mu` for models with bounded POI. Args: mu (Number or Tensor): The signal strength parameter data (Tensor): The data to be considered pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json init_pars (`list`): Values to initialize the model parameters at for the fit par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit Returns: Float: The calculated test statistic, :math:`q_{\mu}` """""" if pdf.config.poi_index is None: raise UnspecifiedPOI( 'No POI is defined. A POI is required for profile likelihood based test statistics.' ) if par_bounds[pdf.config.poi_index][0] != 0: log.warning( 'qmu tilde test statistic used for fit configuration with POI not bounded at zero. Use qmu.' ) return _qmu_like(mu, data, pdf, init_pars, par_bounds) " 5747,"def test_linregress_identical_x(): x = np.zeros(10) y = np.random.rand(10) assert_raises(ValueError, mstats.linregress, x, y) ","def test_linregress_identical_x(): x = np.zeros(10) y = np.random.random(10) assert_raises(ValueError, mstats.linregress, x, y) " 15616,"def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]: """"""Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """""" # constructor does login call Api( data[CONF_USERNAME], data[CONF_PASSWORD], ) return True ","def validate_input(hass: HomeAssistant, data: dict) -> Dict[str, Any]: """"""Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """""" # constructor does login call Api( data[CONF_USERNAME], data[CONF_PASSWORD], ) return True " 45738,"def _linda_forecast( precip, precip_lagr_diff, timesteps, fct_gen, precip_pert_gen, vel_pert_gen, n_ensemble_members, seed, measure_time, print_info, return_output, callback, ): """"""Compute LINDA nowcast."""""" # compute convolved difference fields precip_lagr_diff = precip_lagr_diff.copy() for i in range(precip_lagr_diff.shape[0]): for _ in range(fct_gen[""ari_order""] - i): precip_lagr_diff[i] = _composite_convolution( precip_lagr_diff[i], fct_gen[""kernels_1""], fct_gen[""interp_weights""], ) # initialize the random generators if precip_pert_gen is not None: rs_precip_pert = [] np.random.seed(seed) for i in range(n_ensemble_members): rs = np.random.RandomState(seed) rs_precip_pert.append(rs) seed = rs.randint(0, high=1e9) else: rs_precip_pert = None if vel_pert_gen is not None: vps = [] np.random.seed(seed) for i in range(n_ensemble_members): rs = np.random.RandomState(seed) vp = vel_pert_gen[""init_func""](seed) vps.append( lambda t, vp=vp: vel_pert_gen[""gen_func""]( vp, t * vel_pert_gen[""timestep""] ) ) seed = rs.randint(0, high=1e9) else: vps = None state = { ""precip_fct"": [precip[-1].copy() for i in range(n_ensemble_members)], ""precip_lagr_diff"": [ precip_lagr_diff.copy() for i in range(n_ensemble_members) ], ""rs_precip_pert"": rs_precip_pert, } params = { ""interp_weights"": fct_gen[""interp_weights""], ""kernels_1"": fct_gen[""kernels_1""], ""kernels_2"": fct_gen[""kernels_2""], ""mask_adv"": fct_gen[""mask_adv""], ""num_ens_members"": n_ensemble_members, ""num_workers"": fct_gen[""num_workers""], ""num_ensemble_workers"": min(n_ensemble_members, fct_gen[""num_workers""]), ""precip_pert_gen"": precip_pert_gen, ""psi"": fct_gen[""psi""], } precip_f = nowcast_main_loop( precip[-1], fct_gen[""velocity""], state, timesteps, fct_gen[""extrap_method""], _update, extrap_kwargs=fct_gen[""extrap_kwargs""], vel_pert_gen=vps, params=params, callback=callback, return_output=return_output, num_workers=fct_gen[""num_workers""], measure_time=measure_time, ) if measure_time: precip_f, mainloop_time = precip_f if return_output: if not fct_gen[""add_perturbations""]: precip_f = precip_f[0] if measure_time: return precip_f, mainloop_time else: return precip_f else: return None ","def _linda_forecast( precip, precip_lagr_diff, timesteps, fct_gen, precip_pert_gen, vel_pert_gen, n_ensemble_members, seed, measure_time, print_info, return_output, callback, ): """"""Compute LINDA nowcast."""""" # compute convolved difference fields precip_lagr_diff = precip_lagr_diff.copy() for i in range(precip_lagr_diff.shape[0]): for _ in range(fct_gen[""ari_order""] - i): precip_lagr_diff[i] = _composite_convolution( precip_lagr_diff[i], fct_gen[""kernels_1""], fct_gen[""interp_weights""], ) # initialize the random generators if precip_pert_gen is not None: rs_precip_pert = [] np.random.seed(seed) for i in range(n_ensemble_members): rs = np.random.RandomState(seed) rs_precip_pert.append(rs) seed = rs.randint(0, high=1e9) else: rs_precip_pert = None if vel_pert_gen is not None: vps = [] np.random.seed(seed) for i in range(n_ensemble_members): rs = np.random.RandomState(seed) vp = vel_pert_gen[""init_func""](seed) vps.append( lambda t, vp=vp: vel_pert_gen[""gen_func""]( vp, t * vel_pert_gen[""timestep""] ) ) seed = rs.randint(0, high=1e9) else: vps = None state = { ""precip_fct"": [precip[-1].copy() for i in range(n_ensemble_members)], ""precip_lagr_diff"": [ precip_lagr_diff.copy() for i in range(n_ensemble_members) ], ""rs_precip_pert"": rs_precip_pert, } params = { ""interp_weights"": fct_gen[""interp_weights""], ""kernels_1"": fct_gen[""kernels_1""], ""kernels_2"": fct_gen[""kernels_2""], ""mask_adv"": fct_gen[""mask_adv""], ""num_ens_members"": n_ensemble_members, ""num_workers"": fct_gen[""num_workers""], ""num_ensemble_workers"": min(n_ensemble_members, fct_gen[""num_workers""]), ""precip_pert_gen"": precip_pert_gen, ""psi"": fct_gen[""psi""], } precip_forecast = nowcast_main_loop( precip[-1], fct_gen[""velocity""], state, timesteps, fct_gen[""extrap_method""], _update, extrap_kwargs=fct_gen[""extrap_kwargs""], vel_pert_gen=vps, params=params, callback=callback, return_output=return_output, num_workers=fct_gen[""num_workers""], measure_time=measure_time, ) if measure_time: precip_f, mainloop_time = precip_f if return_output: if not fct_gen[""add_perturbations""]: precip_f = precip_f[0] if measure_time: return precip_f, mainloop_time else: return precip_f else: return None " 29627,"def SSHCluster( hosts: List[str] = None, connect_options: Union[List[dict], dict] = {}, worker_options: dict = {}, scheduler_options: dict = {}, worker_module: str = ""distributed.cli.dask_worker"", remote_python: Union[str, List[str]] = None, **kwargs, ): """"""Deploy a Dask cluster using SSH The SSHCluster function deploys a Dask Scheduler and Workers for you on a set of machine addresses that you provide. The first address will be used for the scheduler while the rest will be used for the workers (feel free to repeat the first hostname if you want to have the scheduler and worker co-habitate one machine.) You may configure the scheduler and workers by passing ``scheduler_options`` and ``worker_options`` dictionary keywords. See the ``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for details on the available options, but the defaults should work in most situations. You may configure your use of SSH itself using the ``connect_options`` keyword, which passes values to the ``asyncssh.connect`` function. For more information on these see the documentation for the ``asyncssh`` library https://asyncssh.readthedocs.io . Parameters ---------- hosts : List[str] List of hostnames or addresses on which to launch our cluster. The first will be used for the scheduler and the rest for workers. connect_options : dict or list of dict, optional Keywords to pass through to :func:`asyncssh.connect`. This could include things such as ``port``, ``username``, ``password`` or ``known_hosts``. See docs for :func:`asyncssh.connect` and :class:`asyncssh.SSHClientConnectionOptions` for full information. If a list it must have the same length as ``hosts``. worker_options : dict, optional Keywords to pass on to workers. scheduler_options : dict, optional Keywords to pass on to scheduler. worker_module : str, optional Python module to call to start the worker. remote_python : str or list of str, optional Path to Python on remote nodes. Examples -------- >>> from dask.distributed import Client, SSHCluster >>> cluster = SSHCluster( ... [""localhost"", ""localhost"", ""localhost"", ""localhost""], ... connect_options={""known_hosts"": None}, ... worker_options={""nthreads"": 2}, ... scheduler_options={""port"": 0, ""dashboard_address"": "":8797""} ... ) >>> client = Client(cluster) An example using a different worker module, in particular the ``CUDAWorker`` worker from the ``dask-cuda`` project. >>> from dask.distributed import Client, SSHCluster >>> cluster = SSHCluster( ... [""localhost"", ""hostwithgpus"", ""anothergpuhost""], ... connect_options={""known_hosts"": None}, ... scheduler_options={""port"": 0, ""dashboard_address"": "":8797""}, ... worker_module=""dask_cuda.CUDAWorker"") >>> client = Client(cluster) See Also -------- dask.distributed.Scheduler dask.distributed.Worker asyncssh.connect """""" if set(kwargs) & old_cluster_kwargs: from .old_ssh import SSHCluster as OldSSHCluster warnings.warn( ""Note that the SSHCluster API has been replaced. "" ""We're routing you to the older implementation. "" ""This will be removed in the future"" ) kwargs.setdefault(""worker_addrs"", hosts) return OldSSHCluster(**kwargs) if not hosts: raise ValueError( f""`hosts` must be a non empty list, value {repr(hosts)!r} found."" ) if isinstance(connect_options, list) and len(connect_options) != len(hosts): raise RuntimeError( ""When specifying a list of connect_options you must provide a "" ""dictionary for each address."" ) if isinstance(remote_python, list) and len(remote_python) != len(hosts): raise RuntimeError( ""When specifying a list of remote_python you must provide a "" ""path for each address."" ) scheduler = { ""cls"": Scheduler, ""options"": { ""address"": hosts[0], ""connect_options"": connect_options if isinstance(connect_options, dict) else connect_options[0], ""kwargs"": scheduler_options, ""remote_python"": remote_python[0] if isinstance(remote_python, list) else remote_python, }, } workers = { i: { ""cls"": Worker, ""options"": { ""address"": host, ""connect_options"": connect_options if isinstance(connect_options, dict) else connect_options[i + 1], ""kwargs"": worker_options, ""worker_module"": worker_module, ""remote_python"": remote_python[i + 1] if isinstance(remote_python, list) else remote_python, }, } for i, host in enumerate(hosts[1:]) } return SpecCluster(workers, scheduler, name=""SSHCluster"", **kwargs) ","def SSHCluster( hosts: List[str] = None, connect_options: Union[List[dict], dict] = {}, worker_options: dict = {}, scheduler_options: dict = {}, worker_module: str = ""distributed.cli.dask_worker"", remote_python: Union[str, List[str]] = None, **kwargs, ): """"""Deploy a Dask cluster using SSH The SSHCluster function deploys a Dask Scheduler and Workers for you on a set of machine addresses that you provide. The first address will be used for the scheduler while the rest will be used for the workers (feel free to repeat the first hostname if you want to have the scheduler and worker co-habitate one machine.) You may configure the scheduler and workers by passing ``scheduler_options`` and ``worker_options`` dictionary keywords. See the ``dask.distributed.Scheduler`` and ``dask.distributed.Worker`` classes for details on the available options, but the defaults should work in most situations. You may configure your use of SSH itself using the ``connect_options`` keyword, which passes values to the ``asyncssh.connect`` function. For more information on these see the documentation for the ``asyncssh`` library https://asyncssh.readthedocs.io . Parameters ---------- hosts : List[str] List of hostnames or addresses on which to launch our cluster. The first will be used for the scheduler and the rest for workers. connect_options : dict or list of dict, optional Keywords to pass through to :func:`asyncssh.connect`. This could include things such as ``port``, ``username``, ``password`` or ``known_hosts``. See docs for :func:`asyncssh.connect` and :class:`asyncssh.SSHClientConnectionOptions` for full information. If a list it must have the same length as ``hosts``. worker_options : dict, optional Keywords to pass on to workers. scheduler_options : dict, optional Keywords to pass on to scheduler. worker_module : str, optional Python module to call to start the worker. remote_python : str or list of str, optional Path to Python on remote nodes. Examples -------- >>> from dask.distributed import Client, SSHCluster >>> cluster = SSHCluster( ... [""localhost"", ""localhost"", ""localhost"", ""localhost""], ... connect_options={""known_hosts"": None}, ... worker_options={""nthreads"": 2}, ... scheduler_options={""port"": 0, ""dashboard_address"": "":8797""} ... ) >>> client = Client(cluster) An example using a different worker module, in particular the ``CUDAWorker`` worker from the ``dask-cuda`` project. >>> from dask.distributed import Client, SSHCluster >>> cluster = SSHCluster( ... [""localhost"", ""hostwithgpus"", ""anothergpuhost""], ... connect_options={""known_hosts"": None}, ... scheduler_options={""port"": 0, ""dashboard_address"": "":8797""}, ... worker_module=""dask_cuda.cli.dask_cuda_worker"") >>> client = Client(cluster) See Also -------- dask.distributed.Scheduler dask.distributed.Worker asyncssh.connect """""" if set(kwargs) & old_cluster_kwargs: from .old_ssh import SSHCluster as OldSSHCluster warnings.warn( ""Note that the SSHCluster API has been replaced. "" ""We're routing you to the older implementation. "" ""This will be removed in the future"" ) kwargs.setdefault(""worker_addrs"", hosts) return OldSSHCluster(**kwargs) if not hosts: raise ValueError( f""`hosts` must be a non empty list, value {repr(hosts)!r} found."" ) if isinstance(connect_options, list) and len(connect_options) != len(hosts): raise RuntimeError( ""When specifying a list of connect_options you must provide a "" ""dictionary for each address."" ) if isinstance(remote_python, list) and len(remote_python) != len(hosts): raise RuntimeError( ""When specifying a list of remote_python you must provide a "" ""path for each address."" ) scheduler = { ""cls"": Scheduler, ""options"": { ""address"": hosts[0], ""connect_options"": connect_options if isinstance(connect_options, dict) else connect_options[0], ""kwargs"": scheduler_options, ""remote_python"": remote_python[0] if isinstance(remote_python, list) else remote_python, }, } workers = { i: { ""cls"": Worker, ""options"": { ""address"": host, ""connect_options"": connect_options if isinstance(connect_options, dict) else connect_options[i + 1], ""kwargs"": worker_options, ""worker_module"": worker_module, ""remote_python"": remote_python[i + 1] if isinstance(remote_python, list) else remote_python, }, } for i, host in enumerate(hosts[1:]) } return SpecCluster(workers, scheduler, name=""SSHCluster"", **kwargs) " 43769,"def derivative(H, x, i, delta=0.005291772): r""""""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian with respect to the :math:`i`-th nuclear coordinate using a central difference approximation. .. math:: \frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2) - \hat{H}(x_i-\delta/2)}{\delta} Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian of the molecule for a given set of nuclear coordinates ``x`` x (array[float]): 1D array with the nuclear coordinates given in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. i (int): index of the nuclear coordinate involved in the derivative :math:`\partial \hat{H}(x)/\partial x_i` delta (float): Step size in Angstroms used to displace the nuclear coordinate. Its default value corresponds to 0.01 Bohr radius. Returns: pennylane.Hamiltonian: the derivative of the Hamiltonian :math:`\partial \hat{H}(x)/\partial x_i` **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> print(derivative(H, x, 2)) (-0.7763135743293005) [I0] + (-0.08534360840293387) [Z0] + (-0.08534360840293387) [Z1] + (0.2669341092545041) [Z2] + (0.26693410925450134) [Z3] + (-0.025233628744274508) [Z0 Z1] + (0.0072162443961340415) [Y0 X1 X2 Y3] + (-0.0072162443961340415) [Y0 Y1 X2 X3] + (-0.0072162443961340415) [X0 X1 Y2 Y3] + (0.0072162443961340415) [X0 Y1 Y2 X3] + (-0.030654287745411964) [Z0 Z2] + (-0.023438043349280003) [Z0 Z3] + (-0.023438043349280003) [Z1 Z2] + (-0.030654287745411964) [Z1 Z3] + (-0.02494407786332001) [Z2 Z3] """""" to_bohr = 1.8897261254535 # plus x_plus = x.copy() x_plus[i] += delta * 0.5 # minus x_minus = x.copy() x_minus[i] -= delta * 0.5 return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1 ","def derivative(H, x, i, delta=0.00529): r""""""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian with respect to the :math:`i`-th nuclear coordinate using a central difference approximation. .. math:: \frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2) - \hat{H}(x_i-\delta/2)}{\delta} Args: H (callable): function with signature ``H(x)`` that builds the electronic Hamiltonian of the molecule for a given set of nuclear coordinates ``x`` x (array[float]): 1D array with the nuclear coordinates given in Angstroms. The size of the array should be ``3*N`` where ``N`` is the number of atoms in the molecule. i (int): index of the nuclear coordinate involved in the derivative :math:`\partial \hat{H}(x)/\partial x_i` delta (float): Step size in Angstroms used to displace the nuclear coordinate. Its default value corresponds to 0.01 Bohr radius. Returns: pennylane.Hamiltonian: the derivative of the Hamiltonian :math:`\partial \hat{H}(x)/\partial x_i` **Example** >>> def H(x): ... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0] >>> x = np.array([0., 0., 0.35, 0., 0., -0.35]) >>> print(derivative(H, x, 2)) (-0.7763135743293005) [I0] + (-0.08534360840293387) [Z0] + (-0.08534360840293387) [Z1] + (0.2669341092545041) [Z2] + (0.26693410925450134) [Z3] + (-0.025233628744274508) [Z0 Z1] + (0.0072162443961340415) [Y0 X1 X2 Y3] + (-0.0072162443961340415) [Y0 Y1 X2 X3] + (-0.0072162443961340415) [X0 X1 Y2 Y3] + (0.0072162443961340415) [X0 Y1 Y2 X3] + (-0.030654287745411964) [Z0 Z2] + (-0.023438043349280003) [Z0 Z3] + (-0.023438043349280003) [Z1 Z2] + (-0.030654287745411964) [Z1 Z3] + (-0.02494407786332001) [Z2 Z3] """""" to_bohr = 1.8897261254535 # plus x_plus = x.copy() x_plus[i] += delta * 0.5 # minus x_minus = x.copy() x_minus[i] -= delta * 0.5 return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1 " 15747,"def set_update_interval(instances: int, requests_remaining: int) -> timedelta: """""" Return data update interval. The number of requests is reset at midnight UTC so we calculate the update interval based on number of minutes until midnight, the number of Airly instances and the number of remaining requests. """""" now = dt_util.utcnow() midnight = dt_util.find_next_time_expression_time( now, seconds=[0], minutes=[0], hours=[0] ) minutes_to_midnight = (midnight - now).total_seconds() / 60 interval = timedelta( minutes=min( max( ceil(minutes_to_midnight / requests_remaining * instances), MIN_UPDATE_INTERVAL, ), MAX_UPDATE_INTERVAL, ) ) _LOGGER.debug(""Data will be update every %s"", interval) return interval ","def set_update_interval(instances_count: int, requests_remaining: int) -> timedelta: """""" Return data update interval. The number of requests is reset at midnight UTC so we calculate the update interval based on number of minutes until midnight, the number of Airly instances and the number of remaining requests. """""" now = dt_util.utcnow() midnight = dt_util.find_next_time_expression_time( now, seconds=[0], minutes=[0], hours=[0] ) minutes_to_midnight = (midnight - now).total_seconds() / 60 interval = timedelta( minutes=min( max( ceil(minutes_to_midnight / requests_remaining * instances), MIN_UPDATE_INTERVAL, ), MAX_UPDATE_INTERVAL, ) ) _LOGGER.debug(""Data will be update every %s"", interval) return interval " 1724,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0, 9, 9], [0, 9, 9], [0, 9, 9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 36058,"def validate_transfer_inputs(inputs, ctx): # pylint: disable=inconsistent-return-statements, unused-argument """"""Check that the instructions dict and the source nodes are consistent"""""" source_nodes = inputs['source_nodes'] instructions = inputs['instructions'] computer = inputs['metadata']['computer'] instructions_dict = instructions.get_dict() local_files = instructions_dict.get('local_files', list()) remote_files = instructions_dict.get('remote_files', list()) symlink_files = instructions_dict.get('symlink_files', list()) source_nodes_provided = set() source_nodes_required = set() error_message_list = [] for node_label, node_object in source_nodes.items(): if isinstance(node_object, orm.RemoteData): if computer.name != node_object.computer.name: error_message = ' > remote node `{}` points to computer `{}`, not the one being used (`{}`)' error_message = error_message.format(node_label, node_object.computer.name, computer.name) error_message_list.append(error_message) for source_label, _, _ in local_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('local_files', source_label, source_node, orm.FolderData) if error_message: error_message_list.append(error_message) for source_label, _, _ in remote_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('remote_files', source_label, source_node, orm.RemoteData) if error_message: error_message_list.append(error_message) for source_label, _, _ in symlink_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('symlink_files', source_label, source_node, orm.RemoteData) if error_message: error_message_list.append(error_message) unrequired_nodes = source_nodes_provided.difference(source_nodes_required) for node_label in unrequired_nodes: error_message = ' > node `{}` provided as inputs is not being used' error_message = error_message.format(node_label) error_message_list.append(error_message) if len(error_message_list) > 0: error_message = '\n\n' for error_add in error_message_list: error_message = error_message + error_add + '\n' return error_message ","def validate_transfer_inputs(inputs, _): """"""Check that the instructions dict and the source nodes are consistent"""""" source_nodes = inputs['source_nodes'] instructions = inputs['instructions'] computer = inputs['metadata']['computer'] instructions_dict = instructions.get_dict() local_files = instructions_dict.get('local_files', list()) remote_files = instructions_dict.get('remote_files', list()) symlink_files = instructions_dict.get('symlink_files', list()) source_nodes_provided = set() source_nodes_required = set() error_message_list = [] for node_label, node_object in source_nodes.items(): if isinstance(node_object, orm.RemoteData): if computer.name != node_object.computer.name: error_message = ' > remote node `{}` points to computer `{}`, not the one being used (`{}`)' error_message = error_message.format(node_label, node_object.computer.name, computer.name) error_message_list.append(error_message) for source_label, _, _ in local_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('local_files', source_label, source_node, orm.FolderData) if error_message: error_message_list.append(error_message) for source_label, _, _ in remote_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('remote_files', source_label, source_node, orm.RemoteData) if error_message: error_message_list.append(error_message) for source_label, _, _ in symlink_files: source_nodes_required.add(source_label) source_node = source_nodes.get(source_label, None) error_message = check_node_type('symlink_files', source_label, source_node, orm.RemoteData) if error_message: error_message_list.append(error_message) unrequired_nodes = source_nodes_provided.difference(source_nodes_required) for node_label in unrequired_nodes: error_message = ' > node `{}` provided as inputs is not being used' error_message = error_message.format(node_label) error_message_list.append(error_message) if len(error_message_list) > 0: error_message = '\n\n' for error_add in error_message_list: error_message = error_message + error_add + '\n' return error_message " 48167,"def check_that_null_character_absents_in_path(file_path: str, file_path_name: str): """"""Function raises ValueError exception if null character: '\0' is specified in path to file"""""" if ""\\0"" in file_path: raise ValueError(f""\\0 is specified in {file_path_name}: {file_path}"") ","def check_that_null_character_absents_in_path(file_path: str, file_path_name: str): """"""Function raises ValueError exception if null character: '\0' is specified in path to file"""""" if ""\0"" in file_path: raise ValueError(f""\\0 is specified in {file_path_name}: {file_path}"") " 24307,"def render_logs_progress(): valid_checks = sorted(get_valid_checks()) total_checks = len(valid_checks) checks_with_logs = 0 lines = ['## Logs specs', '', None, '', '??? check ""Completed""'] for check in valid_checks: config_file = get_config_file(check) status = ' ' tile_only = not os.path.exists(config_file) if not tile_only: with open(config_file) as f: if '# logs:' in f.read(): status = 'X' checks_with_logs += 1 else: readme_file = get_readme_file(check) if os.path.exists(readme_file): with open(readme_file) as f: if '# Log collection' in f.read(): status = 'X' checks_with_logs += 1 if status != 'X': total_checks -= 1 # we cannot really add log collection to tile only integrations lines.append(f' - [{status}] {check}') percent = checks_with_logs / total_checks * 100 formatted_percent = f'{percent:.2f}' lines[2] = f'[={formatted_percent}% ""{formatted_percent}%""]' return lines ","def render_logs_progress(): valid_checks = sorted(get_valid_checks()) total_checks = len(valid_checks) checks_with_logs = 0 lines = ['## Logs specs', '', None, '', '??? check ""Completed""'] for check in valid_checks: config_file = get_config_file(check) status = ' ' tile_only = not os.path.exists(config_file) if not tile_only: with open(config_file, 'r', encoding='utf-8') as f: if '# logs:' in f.read(): status = 'X' checks_with_logs += 1 else: readme_file = get_readme_file(check) if os.path.exists(readme_file): with open(readme_file) as f: if '# Log collection' in f.read(): status = 'X' checks_with_logs += 1 if status != 'X': total_checks -= 1 # we cannot really add log collection to tile only integrations lines.append(f' - [{status}] {check}') percent = checks_with_logs / total_checks * 100 formatted_percent = f'{percent:.2f}' lines[2] = f'[={formatted_percent}% ""{formatted_percent}%""]' return lines " 13762,"def _get_expected_error_settings_dict(): """""" Returns a dict of dicts of expected error settings used for logging and monitoring. The contents of the EXPECTED_ERRORS Django Setting list is processed for efficient lookup by module:class. Returns: (dict): dict of dicts, mapping module-and-class name to settings for proper handling of expected errors. Keys of the inner dicts use the lowercase version of the related Django Setting (e.g. ''REASON_EXPECTED' => 'reason_expected'). Example return value:: { 'rest_framework.exceptions:PermissionDenied': { 'is_ignored': True, 'log_error': True, 'log_stack_trace': True, 'reason_expected': 'In most cases, signifies a user was trying to do something they couldn't.' / 'It is possible that there could be a bug, so this case should still be monitored at some level.' } ... } """""" global _EXPECTED_ERROR_SETTINGS_DICT # Return cached processed mappings if already processed if _EXPECTED_ERROR_SETTINGS_DICT is not None: return _EXPECTED_ERROR_SETTINGS_DICT expected_errors = getattr(settings, 'EXPECTED_ERRORS', None) if expected_errors is None: return None # Use temporary variable to build mappings to avoid multi-threading issue with a partially # processed map. Worst case, it is processed more than once at start-up. expected_error_settings_dict = {} try: for index, expected_error in enumerate(expected_errors): module_and_class = expected_error.get('MODULE_AND_CLASS') processed_expected_error = { 'is_ignored': expected_error.get('IS_IGNORED', True), 'log_error': expected_error.get('LOG_ERROR', False), 'log_stack_trace': expected_error.get('LOG_STACK_TRACE', False), 'reason_expected': expected_error.get('REASON_EXPECTED'), } # validate configuration if not isinstance(module_and_class, str) or ':' not in module_and_class: log.error( ""Skipping EXPECTED_ERRORS[%d] setting. 'MODULE_AND_CLASS' set to [%s] and should be module:class, "" ""like 'rest_framework.exceptions:PermissionDenied'."", index, module_and_class ) continue if not processed_expected_error['reason_expected']: log.error( ""Skipping EXPECTED_ERRORS[%d] setting. 'REASON_EXPECTED' is required to document why %s is an "" ""expected error."", index, module_and_class ) continue expected_error_settings_dict[module_and_class] = processed_expected_error except Exception as e: # pylint: disable=broad-except set_custom_attribute('expected_errors_setting_misconfigured', repr(e)) log.exception(f'Error processing setting EXPECTED_ERRORS. {repr(e)}') _EXPECTED_ERROR_SETTINGS_DICT = expected_error_settings_dict return _EXPECTED_ERROR_SETTINGS_DICT ","def _get_expected_error_settings_dict(): """""" Returns a dict of dicts of expected error settings used for logging and monitoring. The contents of the EXPECTED_ERRORS Django Setting list is processed for efficient lookup by module:class. Returns: (dict): dict of dicts, mapping module-and-class name to settings for proper handling of expected errors. Keys of the inner dicts use the lowercase version of the related Django Setting (e.g. ''REASON_EXPECTED' => 'reason_expected'). Example return value:: { 'rest_framework.exceptions:PermissionDenied': { 'is_ignored': True, 'log_error': True, 'log_stack_trace': True, 'reason_expected': 'In most cases, signifies a user was trying to do something they couldn't.' / 'It is possible that there could be a bug, so this case should still be monitored at some level.' } ... } """""" global _EXPECTED_ERROR_SETTINGS_DICT # Return cached processed mappings if already processed if _EXPECTED_ERROR_SETTINGS_DICT is not None: return _EXPECTED_ERROR_SETTINGS_DICT expected_errors = getattr(settings, 'EXPECTED_ERRORS', None) if expected_errors is None: return None # Use temporary variable to build mappings to avoid multi-threading issue with a partially # processed map. Worst case, it is processed more than once at start-up. expected_error_settings_dict = {} try: for index, expected_error in enumerate(expected_errors): module_and_class = expected_error.get('MODULE_AND_CLASS') processed_expected_error = { 'is_ignored': expected_error.get('IS_IGNORED', True), 'log_error': expected_error.get('LOG_ERROR', False), 'log_stack_trace': expected_error.get('LOG_STACK_TRACE', False), 'reason_expected': expected_error.get('REASON_EXPECTED', None), } # validate configuration if not isinstance(module_and_class, str) or ':' not in module_and_class: log.error( ""Skipping EXPECTED_ERRORS[%d] setting. 'MODULE_AND_CLASS' set to [%s] and should be module:class, "" ""like 'rest_framework.exceptions:PermissionDenied'."", index, module_and_class ) continue if not processed_expected_error['reason_expected']: log.error( ""Skipping EXPECTED_ERRORS[%d] setting. 'REASON_EXPECTED' is required to document why %s is an "" ""expected error."", index, module_and_class ) continue expected_error_settings_dict[module_and_class] = processed_expected_error except Exception as e: # pylint: disable=broad-except set_custom_attribute('expected_errors_setting_misconfigured', repr(e)) log.exception(f'Error processing setting EXPECTED_ERRORS. {repr(e)}') _EXPECTED_ERROR_SETTINGS_DICT = expected_error_settings_dict return _EXPECTED_ERROR_SETTINGS_DICT " 40574,"def attach_wind_and_solar(n, costs,ppl): for tech in snakemake.config['renewable']: if tech == 'hydro': continue n.add(""Carrier"", name=tech) with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds: if ds.indexes['bus'].empty: continue suptech = tech.split('-', 2)[0] if suptech == 'offwind': underwater_fraction = ds['underwater_fraction'].to_pandas() connection_cost = (snakemake.config['lines']['length_factor'] * ds['average_distance'].to_pandas() * (underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] + (1. - underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])) capital_cost = (costs.at['offwind', 'capital_cost'] + costs.at[tech + '-station', 'capital_cost'] + connection_cost) logger.info(""Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}"" .format(connection_cost.min(), connection_cost.max(), tech)) elif suptech == 'onwind': capital_cost = (costs.at['onwind', 'capital_cost'] + costs.at['onwind-landcosts', 'capital_cost']) else: capital_cost = costs.at[tech, 'capital_cost'] if snakemake.config['electricity']['custom_powerplants']: #adapt ""if"" in case wind and solar are not part of custom_ppls? p = ppl.query('carrier == @tech') logger.info('Adding {} generators of type {}'.format(len(p), tech)) profile = ds['profile'].to_pandas() # rename indices in the following to add them at the correct places in the network: profile = profile.rename(index={profile.index[bus_i]: profile.index[bus_i] + ' ' + tech for bus_i in range(len(profile))}) p.bus = p.bus.apply(lambda b: b[:-2] if b[-2:] == '.0' else b) #for some reason buses in custom_ppls may end with .0 -> delete that! .astype(int) does not work? p = p.rename(index={p.index[bus_i]: p.iloc[bus_i].bus + ' ' + tech for bus_i in range(len(p))}) n.madd(""Generator"", p.index, bus=p['bus'], carrier=tech, p_nom_extendable=False, p_nom=p['p_nom'], weight=ds['weight'].to_pandas(), #adapt? Weights happen to be 0...?? marginal_cost=costs.at[suptech, 'marginal_cost'], capital_cost=capital_cost, efficiency=costs.at[suptech, 'efficiency'], p_max_pu=profile.T.loc[:, p.index]) else: n.madd(""Generator"", ds.indexes['bus'], ' ' + tech, bus=ds.indexes['bus'], carrier=tech, p_nom_extendable=True, p_nom_max=ds['p_nom_max'].to_pandas(), weight=ds['weight'].to_pandas(), marginal_cost=costs.at[suptech, 'marginal_cost'], capital_cost=capital_cost, efficiency=costs.at[suptech, 'efficiency'], p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) # # Generators ","def attach_wind_and_solar(n, costs, ppl): for tech in snakemake.config['renewable']: if tech == 'hydro': continue n.add(""Carrier"", name=tech) with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds: if ds.indexes['bus'].empty: continue suptech = tech.split('-', 2)[0] if suptech == 'offwind': underwater_fraction = ds['underwater_fraction'].to_pandas() connection_cost = (snakemake.config['lines']['length_factor'] * ds['average_distance'].to_pandas() * (underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] + (1. - underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])) capital_cost = (costs.at['offwind', 'capital_cost'] + costs.at[tech + '-station', 'capital_cost'] + connection_cost) logger.info(""Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}"" .format(connection_cost.min(), connection_cost.max(), tech)) elif suptech == 'onwind': capital_cost = (costs.at['onwind', 'capital_cost'] + costs.at['onwind-landcosts', 'capital_cost']) else: capital_cost = costs.at[tech, 'capital_cost'] if snakemake.config['electricity']['custom_powerplants']: #adapt ""if"" in case wind and solar are not part of custom_ppls? p = ppl.query('carrier == @tech') logger.info('Adding {} generators of type {}'.format(len(p), tech)) profile = ds['profile'].to_pandas() # rename indices in the following to add them at the correct places in the network: profile = profile.rename(index={profile.index[bus_i]: profile.index[bus_i] + ' ' + tech for bus_i in range(len(profile))}) p.bus = p.bus.apply(lambda b: b[:-2] if b[-2:] == '.0' else b) #for some reason buses in custom_ppls may end with .0 -> delete that! .astype(int) does not work? p = p.rename(index={p.index[bus_i]: p.iloc[bus_i].bus + ' ' + tech for bus_i in range(len(p))}) n.madd(""Generator"", p.index, bus=p['bus'], carrier=tech, p_nom_extendable=False, p_nom=p['p_nom'], weight=ds['weight'].to_pandas(), #adapt? Weights happen to be 0...?? marginal_cost=costs.at[suptech, 'marginal_cost'], capital_cost=capital_cost, efficiency=costs.at[suptech, 'efficiency'], p_max_pu=profile.T.loc[:, p.index]) else: n.madd(""Generator"", ds.indexes['bus'], ' ' + tech, bus=ds.indexes['bus'], carrier=tech, p_nom_extendable=True, p_nom_max=ds['p_nom_max'].to_pandas(), weight=ds['weight'].to_pandas(), marginal_cost=costs.at[suptech, 'marginal_cost'], capital_cost=capital_cost, efficiency=costs.at[suptech, 'efficiency'], p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) # # Generators " 10896,"def pypi_source_urls(pkg_name): """""" Fetch list of source URLs (incl. source filename) for specified Python package from PyPI, using 'simple' PyPI API. """""" # example: https://pypi.python.org/simple/easybuild # see also: # - https://www.python.org/dev/peps/pep-0503/ # - https://wiki.python.org/moin/PyPISimple simple_url = 'https://pypi.python.org/simple/%s' % re.sub(r'[-_.]+', '-', pkg_name.lower()) tmpdir = tempfile.mkdtemp() urls_html = os.path.join(tmpdir, '%s_urls.html' % pkg_name) if download_file(os.path.basename(urls_html), simple_url, urls_html) is None: _log.debug(""Failed to download %s to determine available PyPI URLs for %s"", simple_url, pkg_name) res = [] else: urls_txt = read_file(urls_html) # strip out data-yanked attributes before parsing HTML # see https://github.com/easybuilders/easybuild-framework/issues/3301 urls_txt = re.sub(r'\s*data-yanked', '', urls_txt) parsed_html = ElementTree.ElementTree(ElementTree.fromstring(urls_txt)) if hasattr(parsed_html, 'iter'): res = [a.attrib['href'] for a in parsed_html.iter('a')] else: res = [a.attrib['href'] for a in parsed_html.getiterator('a')] # links are relative, transform them into full URLs; for example: # from: ../../packages////easybuild-.tar.gz#md5= # to: https://pypi.python.org/packages////easybuild-.tar.gz#md5= res = [re.sub('.*/packages/', 'https://pypi.python.org/packages/', x) for x in res] return res ","def pypi_source_urls(pkg_name): """""" Fetch list of source URLs (incl. source filename) for specified Python package from PyPI, using 'simple' PyPI API. """""" # example: https://pypi.python.org/simple/easybuild # see also: # - https://www.python.org/dev/peps/pep-0503/ # - https://wiki.python.org/moin/PyPISimple simple_url = 'https://pypi.python.org/simple/%s' % re.sub(r'[-_.]+', '-', pkg_name.lower()) tmpdir = tempfile.mkdtemp() urls_html = os.path.join(tmpdir, '%s_urls.html' % pkg_name) if download_file(os.path.basename(urls_html), simple_url, urls_html) is None: _log.debug(""Failed to download %s to determine available PyPI URLs for %s"", simple_url, pkg_name) res = [] else: urls_txt = read_file(urls_html) # strip out data-yanked attributes before parsing HTML # see https://github.com/easybuilders/easybuild-framework/issues/3301 urls_txt = re.sub('^.*data-yanked.*$', '', urls_txt, flags=re.M) parsed_html = ElementTree.ElementTree(ElementTree.fromstring(urls_txt)) if hasattr(parsed_html, 'iter'): res = [a.attrib['href'] for a in parsed_html.iter('a')] else: res = [a.attrib['href'] for a in parsed_html.getiterator('a')] # links are relative, transform them into full URLs; for example: # from: ../../packages////easybuild-.tar.gz#md5= # to: https://pypi.python.org/packages////easybuild-.tar.gz#md5= res = [re.sub('.*/packages/', 'https://pypi.python.org/packages/', x) for x in res] return res " 12712,"def test_vintage_simple_success(rule_runner: RuleRunner) -> None: rule_runner.write_files( { ""coursier_resolve.lockfile"": JUNIT4_RESOLVED_LOCKFILE.to_json().decode(""utf-8""), ""BUILD"": dedent( """"""\ jvm_artifact( name = 'junit_junit', group = 'junit', artifact = 'junit', version = '4.13.2', ) coursier_lockfile( name = 'lockfile', requirements = [':junit_junit'], sources = [ ""coursier_resolve.lockfile"", ], ) junit_tests( name='example-test', dependencies= [':lockfile'], ) """""" ), ""SimpleTest.java"": dedent( """""" package org.pantsbuild.example; import junit.framework.TestCase; public class SimpleTest extends TestCase { public void testHello(){ assertTrue(""Hello!"" == ""Hello!""); } } """""" ), } ) tgt = rule_runner.get_target( address=Address( spec_path="""", target_name=""example-test"", relative_file_path=""SimpleTest.java"" ) ) test_result = rule_runner.request( TestResult, [ JavaTestFieldSet.create(tgt), ], ) assert test_result.exit_code == 0 assert re.search(r""Finished:\s+testHello"", test_result.stdout) is not None assert re.search(r""1 tests successful"", test_result.stdout) is not None assert re.search(r""1 tests found"", test_result.stdout) is not None ","def test_vintage_simple_success(rule_runner: RuleRunner) -> None: rule_runner.write_files( { ""coursier_resolve.lockfile"": JUNIT4_RESOLVED_LOCKFILE.to_json().decode(""utf-8""), ""BUILD"": dedent( """"""\ jvm_artifact( name = 'junit_junit', group = 'junit', artifact = 'junit', version = '4.13.2', ) coursier_lockfile( name = 'lockfile', requirements = [':junit_junit'], sources = [ ""coursier_resolve.lockfile"", ], ) junit_tests( name='example-test', dependencies= [':lockfile'], ) """""" ), ""SimpleTest.java"": dedent( """""" package org.pantsbuild.example; import junit.framework.TestCase; public class SimpleTest extends TestCase { public void testHello(){ assertTrue(""Hello!"" == ""Hello!""); } } """""" ), } ) tgt = rule_runner.get_target( Address("""", target_name=""example-test"", relative_file_path=""SimpleTest.java"") ) test_result = rule_runner.request( TestResult, [ JavaTestFieldSet.create(tgt), ], ) assert test_result.exit_code == 0 assert re.search(r""Finished:\s+testHello"", test_result.stdout) is not None assert re.search(r""1 tests successful"", test_result.stdout) is not None assert re.search(r""1 tests found"", test_result.stdout) is not None " 23585,"def read_epw(filename=None, coerce_year=None): ''' Read an EPW file in to a pandas dataframe. Note that values contained in the metadata dictionary are unchanged from the EPW file. EPW files are commonly used by building simulation professionals and are widely available on the web. For example via: https://energyplus.net/weather , http://climate.onebuilding.org or http://www.ladybug.tools/epwmap/ Parameters ---------- filename : None or string, default None If None, attempts to use a Tkinter file browser. A string can be a relative file path, absolute file path, or url. coerce_year : None or int, default None If supplied, the year of the data will be set to this value. This can be a useful feature because EPW data is composed of data from different years. Warning: EPW files always have 365*24 = 8760 data rows; be careful with the use of leap years. Returns ------- Tuple of the form (data, metadata). data : DataFrame A pandas dataframe with the columns described in the table below. For more detailed descriptions of each component, please consult the EnergyPlus Auxiliary Programs documentation available at: https://energyplus.net/documentation. metadata : dict The site metadata available in the file. Notes ----- The returned structures have the following fields. =============== ====== =================== key format description =============== ====== =================== loc String default identifier, not used city String site loccation state-prov String state, province or region (if available) country String site country code data_type String type of original data source WMO_code String WMO identifier latitude Float site latitude longitude Float site longitude TZ Float UTC offset altitude Float site elevation =============== ====== =================== ============================= ====================================================================================================================================================== EPWData field description ============================= ====================================================================================================================================================== index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included) year month day hour minute data_source_unct Data source and uncertainty flags. See [1], chapter 2.13 t_drybulb Dry bulb temperature at the time indicated, deg C t_dewpoint Dew-point temperature at the time indicated, deg C rel_hum Relatitudeive humidity at the time indicated, percent atmospheric_pressure Station pressure at the time indicated, Pa etr Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 etrn Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 ghi_infrared Horizontal infrared radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 ghi Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 dni Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2 dhi Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 global_hor_illum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx direct_normal_illum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx diffuse_horizontal_illum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx zenith_luminance Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2 wind_direction Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm) wind_speed Wind speed at the time indicated, meter/second total_sky_cover Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky opaque_sky_cover Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky visibility Horizontal visibility at the time indicated, km ceiling_height Height of cloud base above local terrain (7777=unlimited), meter present_weather_observation Indicator for remaining fields: If 0, then the observed weather codes are taken from the following field. If 9, then “missing” weather is assumed. Since the primary use of these fields (Present Weather Observation and Present Weather Codes) is for rain/wet surfaces, a missing observation field or a missing weather code implies no rain. present_weather_codes Present weather code, see [1], chapter 2.9.1.28 precipitable_water Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm aerosol_otpical_depth The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless snow_depth Snow depth in centimeters on the day indicated, (999 = missing data) days_since_last_snowfall Number of days since last snowfall (maximum value of 88, where 88 = 88 or greater days; 99 = missing data) albedo The ratio of reflected solar irradiance to global horizontal irradiance, unitless liquid_precipitation_depth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter liquid_precipitation_quantity The period of accumulation for the liquid precipitation depth field, hour ============================= ====================================================================================================================================================== References ---------- [1] EnergyPlus documentation, Auxiliary Programs https://energyplus.net/documentation. ''' if filename is None: try: filename = _interactive_load() except ImportError: raise ImportError('Interactive load failed. Tkinter not supported ' 'on this system. Try installing X-Quartz and ' 'reloading') if filename.startswith('http'): # Attempts to download online EPW file # See comments above for possible online sources request = Request(filename, headers={'User-Agent': ( 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 ' 'Safari/537.36')}) response = urlopen(request) csvdata = io.StringIO(response.read().decode(errors='ignore')) else: # Assume it's accessible via the file system csvdata = open(filename, 'r') # Read line with metadata firstline = csvdata.readline() head = ['loc','city', 'state-prov', 'country', 'data_type','WMO_code', 'latitude', 'longitude', 'TZ','altitude'] meta = dict(zip(head, firstline.rstrip('\n').split("",""))) meta['altitude'] = float(meta['altitude']) meta['latitude'] = float(meta['latitude']) meta['longitude'] = float(meta['longitude']) meta['TZ'] = float(meta['TZ']) colnames = ['year', 'month', 'day', 'hour', 'minute', 'data_source_unct', 't_drybulb', 't_dewpoint', 'rel_hum', 'atmospheric_pressure', 'etr', 'etrn', 'ghi_infrared', 'ghi', 'dni', 'dhi', 'global_hor_illum', 'direct_normal_illum', 'diffuse_horizontal_illum', 'zenith_luminance', 'wind_direction', 'wind_speed', 'total_sky_cover', 'opaque_sky_cover', 'visibility', 'ceiling_height', 'present_weather_observation', 'present_weather_codes', 'precipitable_water', 'aerosol_otpical_depth', 'snow_depth', 'days_since_last_snowfall', 'albedo', 'liquid_precipitation_depth', 'liquid_precipitation_quantity'] # We only have to skip 6 rows instead of 7 because we have already used # the realine call above. data = pd.read_csv(csvdata, skiprows=6, header = 0, names=colnames) # Shift one hour back because EPW's usage of hour 24 # and dateutil's inability to handle that. data[""hour""] = data[""hour""].apply(lambda x: x - 1) # Change to single year if requested if coerce_year is not None: data[""year""] = coerce_year # Update index with correct date information data = data.set_index(pd.to_datetime(data[['year','month','day','hour']])) # Localize time series data = data.tz_localize(int(meta['TZ'] * 3600)) return data, meta ","def read_epw(filename=None, coerce_year=None): ''' Read an EPW file in to a pandas dataframe. Note that values contained in the metadata dictionary are unchanged from the EPW file. EPW files are commonly used by building simulation professionals and are widely available on the web. For example via: https://energyplus.net/weather , http://climate.onebuilding.org or http://www.ladybug.tools/epwmap/ Parameters ---------- filename : None or string, default None If None, attempts to use a Tkinter file browser. A string can be a relative file path, absolute file path, or url. coerce_year : None or int, default None If supplied, the year of the data will be set to this value. This can be a useful feature because EPW data is composed of data from different years. Warning: EPW files always have 365*24 = 8760 data rows; be careful with the use of leap years. Returns ------- Tuple of the form (data, metadata). data : DataFrame A pandas dataframe with the columns described in the table below. For more detailed descriptions of each component, please consult the EnergyPlus Auxiliary Programs documentation available at: https://energyplus.net/documentation. metadata : dict The site metadata available in the file. Notes ----- The returned structures have the following fields. =============== ====== =================== key format description =============== ====== =================== loc String default identifier, not used city String site loccation state-prov String state, province or region (if available) country String site country code data_type String type of original data source WMO_code String WMO identifier latitude Float site latitude longitude Float site longitude TZ Float UTC offset altitude Float site elevation =============== ====== =================== ============================= ====================================================================================================================================================== EPWData field description ============================= ====================================================================================================================================================== index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included) year month day hour minute data_source_unct Data source and uncertainty flags. See [1], chapter 2.13 t_drybulb Dry bulb temperature at the time indicated, deg C t_dewpoint Dew-point temperature at the time indicated, deg C rel_hum Relatitudeive humidity at the time indicated, percent atmospheric_pressure Station pressure at the time indicated, Pa etr Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 etrn Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 ghi_infrared Horizontal infrared radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 ghi Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 dni Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2 dhi Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 global_hor_illum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx direct_normal_illum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx diffuse_horizontal_illum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx zenith_luminance Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2 wind_direction Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm) wind_speed Wind speed at the time indicated, meter/second total_sky_cover Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky opaque_sky_cover Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky visibility Horizontal visibility at the time indicated, km ceiling_height Height of cloud base above local terrain (7777=unlimited), meter present_weather_observation Indicator for remaining fields: If 0, then the observed weather codes are taken from the following field. If 9, then “missing” weather is assumed. Since the primary use of these fields (Present Weather Observation and Present Weather Codes) is for rain/wet surfaces, a missing observation field or a missing weather code implies no rain. present_weather_codes Present weather code, see [1], chapter 2.9.1.28 precipitable_water Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm aerosol_otpical_depth The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless snow_depth Snow depth in centimeters on the day indicated, (999 = missing data) days_since_last_snowfall Number of days since last snowfall (maximum value of 88, where 88 = 88 or greater days; 99 = missing data) albedo The ratio of reflected solar irradiance to global horizontal irradiance, unitless liquid_precipitation_depth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter liquid_precipitation_quantity The period of accumulation for the liquid precipitation depth field, hour ============================= ====================================================================================================================================================== References ---------- [1] EnergyPlus documentation, Auxiliary Programs https://energyplus.net/documentation. ''' if filename is None: try: filename = _interactive_load() except ImportError: raise ImportError('Interactive load failed. Tkinter not supported ' 'on this system. Try installing X-Quartz and ' 'reloading') if filename.startswith('http'): # Attempts to download online EPW file # See comments above for possible online sources request = Request(filename, headers={'User-Agent': ( 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 ' 'Safari/537.36')}) response = urlopen(request) csvdata = io.StringIO(response.read().decode(errors='ignore')) else: # Assume it's accessible via the file system csvdata = open(filename, 'r') # Read line with metadata firstline = csvdata.readline() head = ['loc','city', 'state-prov', 'country', 'data_type','WMO_code', 'latitude', 'longitude', 'TZ','altitude'] meta = dict(zip(head, firstline.rstrip('\n').split("",""))) meta['altitude'] = float(meta['altitude']) meta['latitude'] = float(meta['latitude']) meta['longitude'] = float(meta['longitude']) meta['TZ'] = float(meta['TZ']) colnames = ['year', 'month', 'day', 'hour', 'minute', 'data_source_unct', 't_drybulb', 't_dewpoint', 'rel_hum', 'atmospheric_pressure', 'etr', 'etrn', 'ghi_infrared', 'ghi', 'dni', 'dhi', 'global_hor_illum', 'direct_normal_illum', 'diffuse_horizontal_illum', 'zenith_luminance', 'wind_direction', 'wind_speed', 'total_sky_cover', 'opaque_sky_cover', 'visibility', 'ceiling_height', 'present_weather_observation', 'present_weather_codes', 'precipitable_water', 'aerosol_otpical_depth', 'snow_depth', 'days_since_last_snowfall', 'albedo', 'liquid_precipitation_depth', 'liquid_precipitation_quantity'] # We only have to skip 6 rows instead of 7 because we have already used # the realine call above. data = pd.read_csv(csvdata, skiprows=6, header=0, names=colnames) # Shift one hour back because EPW's usage of hour 24 # and dateutil's inability to handle that. data[""hour""] = data[""hour""].apply(lambda x: x - 1) # Change to single year if requested if coerce_year is not None: data[""year""] = coerce_year # Update index with correct date information data = data.set_index(pd.to_datetime(data[['year','month','day','hour']])) # Localize time series data = data.tz_localize(int(meta['TZ'] * 3600)) return data, meta " 30523,"def txt_file_to_indicator_list(file_path, auto_detect, default_type): with open(file_path, ""r"") as fp: file_data = fp.read() indicator_list = [] only_indicator_list = file_data.split() for indicator in only_indicator_list: # drop punctuation if len(indicator) > 0: if indicator[-1] in "".,?:;\\)}]/!\n\t"": indicator = indicator[:-1] if indicator[0] in "".,({["": indicator = indicator[0:] indicator_type = detect_type(indicator) # indicator not recognized if indicator_type is None: continue if not auto_detect: indicator_type = default_type indicator_list.append({ 'type': indicator_type, 'value': indicator }) return indicator_list ","def txt_file_to_indicator_list(file_path, auto_detect, default_type): with open(file_path, ""r"") as fp: file_data = fp.read() indicator_list = [] raw_splitted_data = file_data.split() for indicator in only_indicator_list: # drop punctuation if len(indicator) > 0: if indicator[-1] in "".,?:;\\)}]/!\n\t"": indicator = indicator[:-1] if indicator[0] in "".,({["": indicator = indicator[0:] indicator_type = detect_type(indicator) # indicator not recognized if indicator_type is None: continue if not auto_detect: indicator_type = default_type indicator_list.append({ 'type': indicator_type, 'value': indicator }) return indicator_list " 31216,"def mantis_get_all_issues_command(client, args): """""" Returns list of all issues for given args Args: client (Client): Mantis client. args (dict): page filters. Returns: list of Mantis issues """""" if args is not None: params = args resp = client.get_issues(params=params).get('issues') issues = [create_output_result(issue) for issue in resp] readable_output = tableToMarkdown(""Mantis Issue Details"", issues, headers=TABLE_HEADERS) results = CommandResults( readable_output=readable_output, outputs_prefix=""Mantis.issue"", outputs_key_field=TABLE_HEADERS, outputs=issues ) return_results(results) ","def mantis_get_all_issues_command(client, args): """""" Returns list of all issues for given args Args: client (Client): Mantis client. args (dict): page filters. Returns: list of Mantis issues """""" if args is not None: params = args resp = client.get_issues(params=params).get('issues') issues = [create_output_result(issue) for issue in resp] readable_output = tableToMarkdown(""Mantis Issue Details"", issues, headers=TABLE_HEADERS) results = CommandResults( readable_output=readable_output, outputs_prefix=""Mantis.issue"", outputs_key_field=TABLE_HEADERS, outputs=issues ) return results " 1583,"def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=False, n_jobs=None): """"""Computes the (weighted) graph of k-Neighbors for points in X Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) or BallTree Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, default=None Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' The distance metric used to calculate the k-Neighbors for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the p param equal to 2.) p : int, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, default=None additional keyword arguments for the metric function. include_self : bool or 'auto', default=None Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. Returns ------- A : sparse graph in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) See also -------- radius_neighbors_graph """""" if not isinstance(X, KNeighborsMixin): X = NearestNeighbors(n_neighbors, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X._fit_X, include_self, mode) return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) ","def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=False, n_jobs=None): """"""Computes the (weighted) graph of k-Neighbors for points in X Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) or BallTree Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, default=None Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' The distance metric used to calculate the k-Neighbors for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the p param equal to 2.) p : int, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, default=None additional keyword arguments for the metric function. include_self : bool or 'auto', default=False Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. Returns ------- A : sparse graph in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) See also -------- radius_neighbors_graph """""" if not isinstance(X, KNeighborsMixin): X = NearestNeighbors(n_neighbors, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X._fit_X, include_self, mode) return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) " 57793,"def is_there_private_packs_to_upload(public_index_json, private_index_path): """""" Checks if there are private packs that should be uploaded. The check compares the private index with the public one to verify if Content commit hash of each private pack in those files (private and public index files) are equal. If there is one private pack that has a different content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT be skipped. Args: public_index_json (dict) : The public index file. private_index_path : Path to where the private index is located. Returns: (bool) True is there is at least one private pack that should be upload. False otherwise (i.e there are no private packs that should upload) """""" logging.debug(""Checking if there are private packs to upload"") with open(os.path.join(private_index_path, f""{GCPConfig.INDEX_NAME}.json"")) as private_index_file: private_index_json = json.load(private_index_file) if was_private_pack_updated(private_index_json, public_index_json): logging.debug(f""There is at least one private pack that was updated, upload should not be skipped"") return True return False ","def is_there_private_packs_to_upload(public_index_json, private_index_path): """""" Checks whether there were changes in private packs from the last upload. The check compares the `content commit hash` field in the public index with the value stored in the private index. If there is at least one private pack that has been updated/released, the upload should be performed and not skipped. Args: public_index_json (dict) : The public index file. private_index_path : Path to where the private index is located. Returns: (bool) True is there is at least one private pack that should be upload. False otherwise (i.e there are no private packs that should upload) """""" logging.debug(""Checking if there are private packs to upload"") with open(os.path.join(private_index_path, f""{GCPConfig.INDEX_NAME}.json"")) as private_index_file: private_index_json = json.load(private_index_file) if was_private_pack_updated(private_index_json, public_index_json): logging.debug(f""There is at least one private pack that was updated, upload should not be skipped"") return True return False " 747,"def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None): Q = compute_unscaled_posterior_precision(design_matrix, regularization_matrix) unscaled_posterior_covariance = covariance_from_precision(Q) # TODO: evaluate whether using the explicit inverse leads to numerical instability pseudoInv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix) # pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2)) degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv) return unscaled_posterior_covariance, pseudoInv, degrees_of_freedom ","def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None): qq = compute_unscaled_posterior_precision(design_matrix, regularization_matrix) unscaled_posterior_covariance = covariance_from_precision(Q) # TODO: evaluate whether using the explicit inverse leads to numerical instability pseudoInv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix) # pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2)) degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv) return unscaled_posterior_covariance, pseudoInv, degrees_of_freedom " 39422,"def load_saturn_ring_alpha(): """"""Load a saturn_ring_alpha source. Returns ------- pyvista.PolyData Sun dataset with texture. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> pl.add_background_image(examples.download_stars_png()) >>> pl.add_mesh(examples.load_saturn_ring_alpha()) >>> pl.show() """""" # https://tamaskis.github.io/files/Visualizing_Celestial_Bodies_in_3D.pdf # Saturn$B!G(Bs rings range from 7000.0 km to 80000.0 km from the surface of the planet inner = 60268.0 + 7000.0 outer = 60268.0 + 80000.0 disc = pyvista.Disc(inner=inner, outer=outer, c_res=50) disc.active_t_coords = np.zeros((disc.points.shape[0], 2)) disc.active_t_coords[:, 0] = np.sqrt(disc.points[:, 0]**2 + disc.points[:, 1]**2) / outer disc.active_t_coords[:, 1] = 0.0 atmosphere_png = examples.download_saturn_ring_alpha_png() atmosphere_tex = pyvista.read_texture(atmosphere_png) disc.textures[""atmosphere""] = atmosphere_tex return disc ","def load_saturn_ring_alpha(): """"""Load a saturn_ring_alpha source. Returns ------- pyvista.PolyData Sun dataset with texture. Examples -------- >>> import pyvista >>> from pyvista import examples >>> pl = pyvista.Plotter() >>> pl.add_background_image(examples.download_stars_png()) >>> pl.add_mesh(examples.load_saturn_ring_alpha()) >>> pl.show() """""" # https://tamaskis.github.io/files/Visualizing_Celestial_Bodies_in_3D.pdf # Saturn's rings range from 7000.0 km to 80000.0 km from the surface of the planet inner = 60268.0 + 7000.0 outer = 60268.0 + 80000.0 disc = pyvista.Disc(inner=inner, outer=outer, c_res=50) disc.active_t_coords = np.zeros((disc.points.shape[0], 2)) disc.active_t_coords[:, 0] = np.sqrt(disc.points[:, 0]**2 + disc.points[:, 1]**2) / outer disc.active_t_coords[:, 1] = 0.0 atmosphere_png = examples.download_saturn_ring_alpha_png() atmosphere_tex = pyvista.read_texture(atmosphere_png) disc.textures[""atmosphere""] = atmosphere_tex return disc " 9852,"def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster = find_cluster_by_name(content, cluster_name) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = True results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) ","def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster = find_cluster_by_name(content, cluster_name) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = changed results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) " 946,"def _tsolve(eq, sym, **flags): """""" Helper for ``_solve`` that solves a transcendental equation with respect to the given symbol. Various equations containing powers and logarithms, can be solved. There is currently no guarantee that all solutions will be returned or that a real solution will be favored over a complex one. Either a list of potential solutions will be returned or None will be returned (in the case that no method was known to get a solution for the equation). All other errors (like the inability to cast an expression as a Poly) are unhandled. Examples ======== >>> from sympy import log >>> from sympy.solvers.solvers import _tsolve as tsolve >>> from sympy.abc import x >>> tsolve(3**(2*x + 5) - 4, x) [-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)] >>> tsolve(log(x) + 2*x, x) [LambertW(2)/2] """""" if 'tsolve_saw' not in flags: flags['tsolve_saw'] = [] if eq in flags['tsolve_saw']: return None else: flags['tsolve_saw'].append(eq) rhs, lhs = _invert(eq, sym) if lhs == sym: return [rhs] try: if lhs.is_Add: # it's time to try factoring; powdenest is used # to try get powers in standard form for better factoring f = factor(powdenest(lhs - rhs)) if f.is_Mul: return _solve(f, sym, **flags) if rhs: f = logcombine(lhs, force=flags.get('force', True)) if f.count(log) != lhs.count(log): if isinstance(f, log): return _solve(f.args[0] - exp(rhs), sym, **flags) return _tsolve(f - rhs, sym, **flags) elif lhs.is_Pow: if lhs.exp.is_Integer: if lhs - rhs != eq: return _solve(lhs - rhs, sym, **flags) if sym not in lhs.exp.free_symbols: return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags) # _tsolve calls this with Dummy before passing the actual number in. if any(t.is_Dummy for t in rhs.free_symbols): raise NotImplementedError # _tsolve will call here again... # a ** g(x) == 0 if not rhs: # f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at # the same place sol_base = _solve(lhs.base, sym, **flags) return [s for s in sol_base if lhs.exp.subs(sym, s) != 0] # a ** g(x) == b if not lhs.base.has(sym): if lhs.base == 0: return _solve(lhs.exp, sym, **flags) if rhs != 0 else [] # Gets most solutions... if lhs.base == rhs.as_base_exp()[0]: # handles case when bases are equal sol = _solve(lhs.exp - rhs.as_base_exp()[1], sym, **flags) else: # handles cases when bases are not equal and exp # may or may not be equal sol = _solve(exp(log(lhs.base)*lhs.exp)-exp(log(rhs)), sym, **flags) # Check for duplicate solutions def equal(expr1, expr2): _ = Dummy() eq = checksol(expr1 - _, _, expr2) if eq is None: if nsimplify(expr1) != nsimplify(expr2): return False # they might be coincidentally the same # so check more rigorously eq = expr1.equals(expr2) return eq # Guess a rational exponent e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base))) e_rat = simplify(posify(e_rat)[0]) n, d = fraction(e_rat) if expand(lhs.base**n - rhs**d) == 0: sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)] sol.extend(_solve(lhs.exp - e_rat, sym, **flags)) return list(ordered(set(sol))) # f(x) ** g(x) == c else: sol = [] logform = lhs.exp*log(lhs.base) - log(rhs) if logform != lhs - rhs: try: sol.extend(_solve(logform, sym, **flags)) except NotImplementedError: pass # Collect possible solutions and check with substitution later. check = [] if rhs == 1: # f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1 check.extend(_solve(lhs.exp, sym, **flags)) check.extend(_solve(lhs.base - 1, sym, **flags)) check.extend(_solve(lhs.base + 1, sym, **flags)) elif rhs.is_Rational: for d in (i for i in divisors(abs(rhs.p)) if i != 1): e, t = integer_log(rhs.p, d) if not t: continue # rhs.p != d**b for s in divisors(abs(rhs.q)): if s**e== rhs.q: r = Rational(d, s) check.extend(_solve(lhs.base - r, sym, **flags)) check.extend(_solve(lhs.base + r, sym, **flags)) check.extend(_solve(lhs.exp - e, sym, **flags)) elif rhs.is_irrational: b_l, e_l = lhs.base.as_base_exp() n, d = (e_l*lhs.exp).as_numer_denom() b, e = sqrtdenest(rhs).as_base_exp() check = [sqrtdenest(i) for i in (_solve(lhs.base - b, sym, **flags))] check.extend([sqrtdenest(i) for i in (_solve(lhs.exp - e, sym, **flags))]) if e_l*d != 1: check.extend(_solve(b_l**n - rhs**(e_l*d), sym, **flags)) for s in check: ok = checksol(eq, sym, s) if ok is None: ok = eq.subs(sym, s).equals(0) if ok: sol.append(s) return list(ordered(set(sol))) elif lhs.is_Function and len(lhs.args) == 1: if lhs.func in multi_inverses: # sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3)) soln = [] for i in multi_inverses[type(lhs)](rhs): soln.extend(_solve(lhs.args[0] - i, sym, **flags)) return list(ordered(soln)) elif lhs.func == LambertW: return _solve(lhs.args[0] - rhs*exp(rhs), sym, **flags) rewrite = lhs.rewrite(exp) if rewrite != lhs: return _solve(rewrite - rhs, sym, **flags) except NotImplementedError: pass # maybe it is a lambert pattern if flags.pop('bivariate', True): # lambert forms may need some help being recognized, e.g. changing # 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1 # to 2**(3*x) + (x*log(2) + 1)**3 logs = eq.atoms(log) spow = {i.exp for j in logs for i in j.atoms(Pow) if i.base == sym} spow = min(spow) if spow else 1 if spow > 1 and eq.subs(sym**spow, Dummy()).has_free(sym): spow = 1 # there was a free-symbol or non-pow generator with sym if spow != 1: u = Dummy('bivariate-cov') p = sym**spow ueq = eq.subs(p, u) sol = solve(ueq, u, **flags) inv = solve(p - u, sym) rv = [] for i in inv: rv.extend([i.subs(u, s) for s in sol]) return rv g = _filtered_gens(eq.as_poly(), sym) up_or_log = set() for gi in g: if isinstance(gi, (exp, log)) or (gi.is_Pow and gi.base == S.Exp1): up_or_log.add(gi) elif gi.is_Pow: gisimp = powdenest(expand_power_exp(gi)) if gisimp.is_Pow and sym in gisimp.exp.free_symbols: up_or_log.add(gi) eq_down = expand_log(expand_power_exp(eq)).subs( dict(list(zip(up_or_log, [0]*len(up_or_log))))) eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down)) rhs, lhs = _invert(eq, sym) if lhs.has(sym): try: poly = lhs.as_poly() g = _filtered_gens(poly, sym) _eq = lhs - rhs sols = _solve_lambert(_eq, sym, g) # use a simplified form if it satisfies eq # and has fewer operations for n, s in enumerate(sols): ns = nsimplify(s) if ns != s and ns.count_ops() <= s.count_ops(): ok = checksol(_eq, sym, ns) if ok is None: ok = _eq.subs(sym, ns).equals(0) if ok: sols[n] = ns return sols except NotImplementedError: # maybe it's a convoluted function if len(g) == 2: try: gpu = bivariate_type(lhs - rhs, *g) if gpu is None: raise NotImplementedError g, p, u = gpu flags['bivariate'] = False inversion = _tsolve(g - u, sym, **flags) if inversion: sol = _solve(p, u, **flags) return list(ordered({i.subs(u, s) for i in inversion for s in sol})) except NotImplementedError: pass else: pass if flags.pop('force', True): flags['force'] = False pos, reps = posify(lhs - rhs) if rhs == S.ComplexInfinity: return [] for u, s in reps.items(): if s == sym: break else: u = sym if pos.has(u): try: soln = _solve(pos, u, **flags) return list(ordered([s.subs(reps) for s in soln])) except NotImplementedError: pass else: pass # here for coverage return # here for coverage ","def _tsolve(eq, sym, **flags): """""" Helper for ``_solve`` that solves a transcendental equation with respect to the given symbol. Various equations containing powers and logarithms, can be solved. There is currently no guarantee that all solutions will be returned or that a real solution will be favored over a complex one. Either a list of potential solutions will be returned or None will be returned (in the case that no method was known to get a solution for the equation). All other errors (like the inability to cast an expression as a Poly) are unhandled. Examples ======== >>> from sympy import log >>> from sympy.solvers.solvers import _tsolve as tsolve >>> from sympy.abc import x >>> tsolve(3**(2*x + 5) - 4, x) [-5/2 + log(2)/log(3), (-5*log(3)/2 + log(2) + I*pi)/log(3)] >>> tsolve(log(x) + 2*x, x) [LambertW(2)/2] """""" if 'tsolve_saw' not in flags: flags['tsolve_saw'] = [] if eq in flags['tsolve_saw']: return None else: flags['tsolve_saw'].append(eq) rhs, lhs = _invert(eq, sym) if lhs == sym: return [rhs] try: if lhs.is_Add: # it's time to try factoring; powdenest is used # to try get powers in standard form for better factoring f = factor(powdenest(lhs - rhs)) if f.is_Mul: return _solve(f, sym, **flags) if rhs: f = logcombine(lhs, force=flags.get('force', True)) if f.count(log) != lhs.count(log): if isinstance(f, log): return _solve(f.args[0] - exp(rhs), sym, **flags) return _tsolve(f - rhs, sym, **flags) elif lhs.is_Pow: if lhs.exp.is_Integer: if lhs - rhs != eq: return _solve(lhs - rhs, sym, **flags) if sym not in lhs.exp.free_symbols: return _solve(lhs.base - rhs**(1/lhs.exp), sym, **flags) # _tsolve calls this with Dummy before passing the actual number in. if any(t.is_Dummy for t in rhs.free_symbols): raise NotImplementedError # _tsolve will call here again... # a ** g(x) == 0 if not rhs: # f(x)**g(x) only has solutions where f(x) == 0 and g(x) != 0 at # the same place sol_base = _solve(lhs.base, sym, **flags) return [s for s in sol_base if lhs.exp.subs(sym, s) != 0] # a ** g(x) == b if not lhs.base.has(sym): if lhs.base == 0: return _solve(lhs.exp, sym, **flags) if rhs != 0 else [] # Gets most solutions... if lhs.base == rhs.as_base_exp()[0]: # handles case when bases are equal sol = _solve(lhs.exp - rhs.as_base_exp()[1], sym, **flags) else: # handles cases when bases are not equal and exp # may or may not be equal sol = _solve(exp(log(lhs.base)*lhs.exp)-exp(log(rhs)), sym, **flags) # Check for duplicate solutions def equal(expr1, expr2): _ = Dummy() eq = checksol(expr1 - _, _, expr2) if eq is None: if nsimplify(expr1) != nsimplify(expr2): return False # they might be coincidentally the same # so check more rigorously eq = expr1.equals(expr2) return eq # Guess a rational exponent e_rat = nsimplify(log(abs(rhs))/log(abs(lhs.base))) e_rat = simplify(posify(e_rat)[0]) n, d = fraction(e_rat) if expand(lhs.base**n - rhs**d) == 0: sol = [s for s in sol if not equal(lhs.exp.subs(sym, s), e_rat)] sol.extend(_solve(lhs.exp - e_rat, sym, **flags)) return list(ordered(set(sol))) # f(x) ** g(x) == c else: sol = [] logform = lhs.exp*log(lhs.base) - log(rhs) if logform != lhs - rhs: try: sol.extend(_solve(logform, sym, **flags)) except NotImplementedError: pass # Collect possible solutions and check with substitution later. check = [] if rhs == 1: # f(x) ** g(x) = 1 -- g(x)=0 or f(x)=+-1 check.extend(_solve(lhs.exp, sym, **flags)) check.extend(_solve(lhs.base - 1, sym, **flags)) check.extend(_solve(lhs.base + 1, sym, **flags)) elif rhs.is_Rational: for d in (i for i in divisors(abs(rhs.p)) if i != 1): e, t = integer_log(rhs.p, d) if not t: continue # rhs.p != d**b for s in divisors(abs(rhs.q)): if s**e== rhs.q: r = Rational(d, s) check.extend(_solve(lhs.base - r, sym, **flags)) check.extend(_solve(lhs.base + r, sym, **flags)) check.extend(_solve(lhs.exp - e, sym, **flags)) elif rhs.is_irrational: b_l, e_l = lhs.base.as_base_exp() n, d = (e_l*lhs.exp).as_numer_denom() b, e = sqrtdenest(rhs).as_base_exp() check = [sqrtdenest(i) for i in (_solve(lhs.base - b, sym, **flags))] check.extend([sqrtdenest(i) for i in (_solve(lhs.exp - e, sym, **flags))]) if e_l*d != 1: check.extend(_solve(b_l**n - rhs**(e_l*d), sym, **flags)) for s in check: ok = checksol(eq, sym, s) if ok is None: ok = eq.subs(sym, s).equals(0) if ok: sol.append(s) return list(ordered(set(sol))) elif lhs.is_Function and len(lhs.args) == 1: if lhs.func in multi_inverses: # sin(x) = 1/3 -> x - asin(1/3) & x - (pi - asin(1/3)) soln = [] for i in multi_inverses[type(lhs)](rhs): soln.extend(_solve(lhs.args[0] - i, sym, **flags)) return list(ordered(soln)) elif lhs.func == LambertW: return _solve(lhs.args[0] - rhs*exp(rhs), sym, **flags) rewrite = lhs.rewrite(exp) if rewrite != lhs: return _solve(rewrite - rhs, sym, **flags) except NotImplementedError: pass # maybe it is a lambert pattern if flags.pop('bivariate', True): # lambert forms may need some help being recognized, e.g. changing # 2**(3*x) + x**3*log(2)**3 + 3*x**2*log(2)**2 + 3*x*log(2) + 1 # to 2**(3*x) + (x*log(2) + 1)**3 logs = eq.atoms(log) spow = {i.exp.as_coeff_Mul()[0] for j in logs for i in j.atoms(Pow) if i.base == sym} spow = min(spow) if spow else 1 if spow > 1 and eq.subs(sym**spow, Dummy()).has_free(sym): spow = 1 # there was a free-symbol or non-pow generator with sym if spow != 1: u = Dummy('bivariate-cov') p = sym**spow ueq = eq.subs(p, u) sol = solve(ueq, u, **flags) inv = solve(p - u, sym) rv = [] for i in inv: rv.extend([i.subs(u, s) for s in sol]) return rv g = _filtered_gens(eq.as_poly(), sym) up_or_log = set() for gi in g: if isinstance(gi, (exp, log)) or (gi.is_Pow and gi.base == S.Exp1): up_or_log.add(gi) elif gi.is_Pow: gisimp = powdenest(expand_power_exp(gi)) if gisimp.is_Pow and sym in gisimp.exp.free_symbols: up_or_log.add(gi) eq_down = expand_log(expand_power_exp(eq)).subs( dict(list(zip(up_or_log, [0]*len(up_or_log))))) eq = expand_power_exp(factor(eq_down, deep=True) + (eq - eq_down)) rhs, lhs = _invert(eq, sym) if lhs.has(sym): try: poly = lhs.as_poly() g = _filtered_gens(poly, sym) _eq = lhs - rhs sols = _solve_lambert(_eq, sym, g) # use a simplified form if it satisfies eq # and has fewer operations for n, s in enumerate(sols): ns = nsimplify(s) if ns != s and ns.count_ops() <= s.count_ops(): ok = checksol(_eq, sym, ns) if ok is None: ok = _eq.subs(sym, ns).equals(0) if ok: sols[n] = ns return sols except NotImplementedError: # maybe it's a convoluted function if len(g) == 2: try: gpu = bivariate_type(lhs - rhs, *g) if gpu is None: raise NotImplementedError g, p, u = gpu flags['bivariate'] = False inversion = _tsolve(g - u, sym, **flags) if inversion: sol = _solve(p, u, **flags) return list(ordered({i.subs(u, s) for i in inversion for s in sol})) except NotImplementedError: pass else: pass if flags.pop('force', True): flags['force'] = False pos, reps = posify(lhs - rhs) if rhs == S.ComplexInfinity: return [] for u, s in reps.items(): if s == sym: break else: u = sym if pos.has(u): try: soln = _solve(pos, u, **flags) return list(ordered([s.subs(reps) for s in soln])) except NotImplementedError: pass else: pass # here for coverage return # here for coverage " 14534,"def player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version): # general imports from time import sleep import logging from glob import glob from time import time, strftime, localtime # networking import zmq import zmq_tools import numpy as np # zmq ipc setup zmq_ctx = zmq.Context() ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url) notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=(""notify"",)) # log setup logging.getLogger(""OpenGL"").setLevel(logging.ERROR) logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.NOTSET) logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url)) # create logger for the context of this function logger = logging.getLogger(__name__) try: from background_helper import IPC_Logging_Task_Proxy IPC_Logging_Task_Proxy.push_url = ipc_push_url from tasklib.background.patches import IPCLoggingPatch IPCLoggingPatch.ipc_push_url = ipc_push_url # imports from file_methods import Persistent_Dict, next_export_sub_dir # display import glfw # check versions for our own depedencies as they are fast-changing from pyglui import __version__ as pyglui_version from pyglui import ui, cygl from pyglui.cygl.utils import Named_Texture, RGBA import gl_utils # capture from video_capture import File_Source # helpers/utils from version_utils import VersionFormat from methods import normalize, denormalize, delta_t, get_system_info import player_methods as pm from csv_utils import write_key_value_file # Plug-ins from plugin import Plugin, Plugin_List, import_runtime_plugins from plugin_manager import Plugin_Manager from vis_circle import Vis_Circle from vis_cross import Vis_Cross from vis_polyline import Vis_Polyline from vis_light_points import Vis_Light_Points from vis_watermark import Vis_Watermark from vis_fixation import Vis_Fixation # from vis_scan_path import Vis_Scan_Path from vis_eye_video_overlay import Vis_Eye_Video_Overlay from seek_control import Seek_Control from offline_surface_tracker import Offline_Surface_Tracker # from marker_auto_trim_marks import Marker_Auto_Trim_Marks from fixation_detector import Offline_Fixation_Detector from eye_movement_detector import Offline_Eye_Movement_Detector from log_display import Log_Display from annotations import Annotation_Player from raw_data_exporter import Raw_Data_Exporter from log_history import Log_History from pupil_producers import Pupil_From_Recording, Offline_Pupil_Detection from gaze_producer.gaze_from_recording import GazeFromRecording from gaze_producer.gaze_from_offline_calibration import ( GazeFromOfflineCalibration, ) from system_graphs import System_Graphs from system_timelines import System_Timelines from blink_detection import Offline_Blink_Detection from audio_playback import Audio_Playback from video_export.plugins.imotions_exporter import iMotions_Exporter from video_export.plugins.eye_video_exporter import Eye_Video_Exporter from video_export.plugins.world_video_exporter import World_Video_Exporter from video_capture import File_Source assert VersionFormat(pyglui_version) >= VersionFormat( ""1.23"" ), ""pyglui out of date, please upgrade to newest version"" runtime_plugins = import_runtime_plugins(os.path.join(user_dir, ""plugins"")) system_plugins = [ Log_Display, Seek_Control, Plugin_Manager, System_Graphs, System_Timelines, Audio_Playback, ] user_plugins = [ Vis_Circle, Vis_Fixation, Vis_Polyline, Vis_Light_Points, Vis_Cross, Vis_Watermark, Vis_Eye_Video_Overlay, # Vis_Scan_Path, Offline_Fixation_Detector, Offline_Eye_Movement_Detector, Offline_Blink_Detection, Offline_Surface_Tracker, Raw_Data_Exporter, Annotation_Player, Log_History, Pupil_From_Recording, Offline_Pupil_Detection, GazeFromRecording, GazeFromOfflineCalibration, World_Video_Exporter, iMotions_Exporter, Eye_Video_Exporter, ] + runtime_plugins plugins = system_plugins + user_plugins # Callback functions def on_resize(window, w, h): nonlocal window_size nonlocal hdpi_factor if w == 0 or h == 0: return hdpi_factor = glfw.getHDPIFactor(window) g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor window_size = w, h g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h g_pool.gui.update_window(*window_size) g_pool.gui.collect_menus() for p in g_pool.plugins: p.on_window_resize(window, *g_pool.camera_render_size) def on_window_key(window, key, scancode, action, mods): g_pool.gui.update_key(key, scancode, action, mods) def on_window_char(window, char): g_pool.gui.update_char(char) def on_window_mouse_button(window, button, action, mods): g_pool.gui.update_button(button, action, mods) def on_pos(window, x, y): x, y = x * hdpi_factor, y * hdpi_factor g_pool.gui.update_mouse(x, y) pos = x, y pos = normalize(pos, g_pool.camera_render_size) # Position in img pixels pos = denormalize(pos, g_pool.capture.frame_size) for p in g_pool.plugins: p.on_pos(pos) def on_scroll(window, x, y): g_pool.gui.update_scroll(x, y * scroll_factor) def on_drop(window, count, paths): for x in range(count): new_rec_dir = paths[x].decode(""utf-8"") if pm.is_pupil_rec_dir(new_rec_dir): logger.debug(""Starting new session with '{}'"".format(new_rec_dir)) ipc_pub.notify( { ""subject"": ""player_drop_process.should_start"", ""rec_dir"": new_rec_dir, } ) glfw.glfwSetWindowShouldClose(window, True) else: logger.error( ""'{}' is not a valid pupil recording"".format(new_rec_dir) ) tick = delta_t() def get_dt(): return next(tick) meta_info = pm.load_meta_info(rec_dir) # log info about Pupil Platform and Platform in player.log logger.info(""Application Version: {}"".format(app_version)) logger.info(""System Info: {}"".format(get_system_info())) icon_bar_width = 50 window_size = None hdpi_factor = 1.0 # create container for globally scoped vars g_pool = Global_Container() g_pool.app = ""player"" g_pool.zmq_ctx = zmq_ctx g_pool.ipc_pub = ipc_pub g_pool.ipc_pub_url = ipc_pub_url g_pool.ipc_sub_url = ipc_sub_url g_pool.ipc_push_url = ipc_push_url g_pool.plugin_by_name = {p.__name__: p for p in plugins} g_pool.camera_render_size = None valid_ext = ("".mp4"", "".mkv"", "".avi"", "".h264"", "".mjpeg"", "".fake"") video_path = [ f for f in glob(os.path.join(rec_dir, ""world.*"")) if os.path.splitext(f)[1] in valid_ext ][0] File_Source( g_pool, timing=""external"", source_path=video_path, buffered_decoding=True, fill_gaps=True, ) # load session persistent settings session_settings = Persistent_Dict( os.path.join(user_dir, ""user_settings_player"") ) if VersionFormat(session_settings.get(""version"", ""0.0"")) != app_version: logger.info( ""Session setting are a different version of this app. I will not use those."" ) session_settings.clear() width, height = g_pool.capture.frame_size width += icon_bar_width width, height = session_settings.get(""window_size"", (width, height)) window_pos = session_settings.get(""window_position"", window_position_default) window_name = ""Pupil Player: {} - {}"".format( meta_info[""Recording Name""], os.path.split(rec_dir)[-1] ) glfw.glfwInit() main_window = glfw.glfwCreateWindow(width, height, window_name, None, None) glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1]) glfw.glfwMakeContextCurrent(main_window) cygl.utils.init() g_pool.main_window = main_window def set_scale(new_scale): g_pool.gui_user_scale = new_scale window_size = ( g_pool.camera_render_size[0] + int(icon_bar_width * g_pool.gui_user_scale * hdpi_factor), glfw.glfwGetFramebufferSize(main_window)[1], ) logger.warning(icon_bar_width * g_pool.gui_user_scale * hdpi_factor) glfw.glfwSetWindowSize(main_window, *window_size) # load pupil_positions, gaze_positions g_pool.binocular = meta_info.get(""Eye Mode"", ""monocular"") == ""binocular"" g_pool.version = app_version g_pool.timestamps = g_pool.capture.timestamps g_pool.get_timestamp = lambda: 0.0 g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.meta_info = meta_info g_pool.min_data_confidence = session_settings.get(""min_data_confidence"", 0.6) g_pool.min_calibration_confidence = session_settings.get( ""min_calibration_confidence"", 0.8 ) # populated by producers g_pool.pupil_positions = pm.Bisector() g_pool.pupil_positions_by_id = (pm.Bisector(), pm.Bisector()) g_pool.gaze_positions = pm.Bisector() g_pool.fixations = pm.Affiliator() g_pool.eye_movement_segments = pm.Affiliator() def set_data_confidence(new_confidence): g_pool.min_data_confidence = new_confidence notification = {""subject"": ""min_data_confidence_changed""} notification[""_notify_time_""] = time() + 0.8 g_pool.ipc_pub.notify(notification) def do_export(_): left_idx = g_pool.seek_control.trim_left right_idx = g_pool.seek_control.trim_right export_range = left_idx, right_idx + 1 # exclusive range.stop export_dir = os.path.join(g_pool.rec_dir, ""exports"") export_dir = next_export_sub_dir(export_dir) os.makedirs(export_dir) logger.info('Created export dir at ""{}""'.format(export_dir)) export_info = { ""Player Software Version"": str(g_pool.version), ""Data Format Version"": meta_info[""Data Format Version""], ""Export Date"": strftime(""%d.%m.%Y"", localtime()), ""Export Time"": strftime(""%H:%M:%S"", localtime()), ""Frame Index Range:"": g_pool.seek_control.get_frame_index_trim_range_string(), ""Relative Time Range"": g_pool.seek_control.get_rel_time_trim_range_string(), ""Absolute Time Range"": g_pool.seek_control.get_abs_time_trim_range_string(), } with open(os.path.join(export_dir, ""export_info.csv""), ""w"") as csv: write_key_value_file(csv, export_info) notification = { ""subject"": ""should_export"", ""range"": export_range, ""export_dir"": export_dir, } g_pool.ipc_pub.notify(notification) def reset_restart(): logger.warning(""Resetting all settings and restarting Player."") glfw.glfwSetWindowShouldClose(main_window, True) ipc_pub.notify({""subject"": ""clear_settings_process.should_start""}) ipc_pub.notify( { ""subject"": ""player_process.should_start"", ""rec_dir"": rec_dir, ""delay"": 2.0, } ) def toggle_general_settings(collapsed): # this is the menu toggle logic. # Only one menu can be open. # If no menu is open the menubar should collapse. g_pool.menubar.collapsed = collapsed for m in g_pool.menubar.elements: m.collapsed = True general_settings.collapsed = collapsed g_pool.gui = ui.UI() g_pool.gui_user_scale = session_settings.get(""gui_scale"", 1.0) g_pool.menubar = ui.Scrolling_Menu( ""Settings"", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos=""left"" ) g_pool.iconbar = ui.Scrolling_Menu( ""Icons"", pos=(-icon_bar_width, 0), size=(0, 0), header_pos=""hidden"" ) g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0)) g_pool.timelines.horizontal_constraint = g_pool.menubar g_pool.user_timelines = ui.Timeline_Menu( ""User Timelines"", pos=(0.0, -150.0), size=(0.0, 0.0), header_pos=""headline"" ) g_pool.user_timelines.color = RGBA(a=0.0) g_pool.user_timelines.collapsed = True # add container that constaints itself to the seekbar height vert_constr = ui.Container((0, 0), (0, -50.0), (0, 0)) vert_constr.append(g_pool.user_timelines) g_pool.timelines.append(vert_constr) def set_window_size(): f_width, f_height = g_pool.capture.frame_size f_width += int(icon_bar_width * g_pool.gui.scale) glfw.glfwSetWindowSize(main_window, f_width, f_height) general_settings = ui.Growing_Menu(""General"", header_pos=""headline"") general_settings.append(ui.Button(""Reset window size"", set_window_size)) general_settings.append( ui.Selector( ""gui_user_scale"", g_pool, setter=set_scale, selection=[0.8, 0.9, 1.0, 1.1, 1.2] + list(np.arange(1.5, 5.1, 0.5)), label=""Interface Size"", ) ) general_settings.append( ui.Info_Text(""Player Version: {}"".format(g_pool.version)) ) general_settings.append( ui.Info_Text( ""Capture Version: {}"".format(meta_info[""Capture Software Version""]) ) ) general_settings.append( ui.Info_Text( ""Data Format Version: {}"".format(meta_info[""Data Format Version""]) ) ) general_settings.append( ui.Info_Text( ""High level data, e.g. fixations, or visualizations only consider gaze data that has an equal or higher confidence than the minimum data confidence."" ) ) general_settings.append( ui.Slider( ""min_data_confidence"", g_pool, setter=set_data_confidence, step=0.05, min=0.0, max=1.0, label=""Minimum data confidence"", ) ) general_settings.append( ui.Button(""Restart with default settings"", reset_restart) ) g_pool.menubar.append(general_settings) icon = ui.Icon( ""collapsed"", general_settings, label=chr(0xE8B8), on_val=False, off_val=True, setter=toggle_general_settings, label_font=""pupil_icons"", ) icon.tooltip = ""General Settings"" g_pool.iconbar.append(icon) user_plugin_separator = ui.Separator() user_plugin_separator.order = 0.35 g_pool.iconbar.append(user_plugin_separator) g_pool.quickbar = ui.Stretching_Menu(""Quick Bar"", (0, 100), (100, -100)) g_pool.export_button = ui.Thumb( ""export"", label=chr(0xE2C5), getter=lambda: False, setter=do_export, hotkey=""e"", label_font=""pupil_icons"", ) g_pool.quickbar.extend([g_pool.export_button]) g_pool.gui.append(g_pool.menubar) g_pool.gui.append(g_pool.timelines) g_pool.gui.append(g_pool.iconbar) g_pool.gui.append(g_pool.quickbar) # we always load these plugins default_plugins = [ (""Plugin_Manager"", {}), (""Seek_Control"", {}), (""Log_Display"", {}), (""Raw_Data_Exporter"", {}), (""Vis_Polyline"", {}), (""Vis_Circle"", {}), (""System_Graphs"", {}), (""System_Timelines"", {}), (""World_Video_Exporter"", {}), (""Pupil_From_Recording"", {}), (""GazeFromRecording"", {}), (""Audio_Playback"", {}), ] g_pool.plugins = Plugin_List( g_pool, session_settings.get(""loaded_plugins"", default_plugins) ) # Manually add g_pool.capture to the plugin list g_pool.plugins._plugins.append(g_pool.capture) g_pool.plugins._plugins.sort(key=lambda p: p.order) g_pool.capture.init_ui() general_settings.insert( -1, ui.Text_Input( ""rel_time_trim_section"", getter=g_pool.seek_control.get_rel_time_trim_range_string, setter=g_pool.seek_control.set_rel_time_trim_range_string, label=""Relative time range to export"", ), ) general_settings.insert( -1, ui.Text_Input( ""frame_idx_trim_section"", getter=g_pool.seek_control.get_frame_index_trim_range_string, setter=g_pool.seek_control.set_frame_index_trim_range_string, label=""Frame index range to export"", ), ) # Register callbacks main_window glfw.glfwSetFramebufferSizeCallback(main_window, on_resize) glfw.glfwSetKeyCallback(main_window, on_window_key) glfw.glfwSetCharCallback(main_window, on_window_char) glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button) glfw.glfwSetCursorPosCallback(main_window, on_pos) glfw.glfwSetScrollCallback(main_window, on_scroll) glfw.glfwSetDropCallback(main_window, on_drop) toggle_general_settings(True) g_pool.gui.configuration = session_settings.get(""ui_config"", {}) # gl_state settings gl_utils.basic_gl_setup() g_pool.image_tex = Named_Texture() # trigger on_resize on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window)) def handle_notifications(n): subject = n[""subject""] if subject == ""start_plugin"": g_pool.plugins.add( g_pool.plugin_by_name[n[""name""]], args=n.get(""args"", {}) ) elif subject.startswith(""meta.should_doc""): ipc_pub.notify( {""subject"": ""meta.doc"", ""actor"": g_pool.app, ""doc"": player.__doc__} ) for p in g_pool.plugins: if ( p.on_notify.__doc__ and p.__class__.on_notify != Plugin.on_notify ): ipc_pub.notify( { ""subject"": ""meta.doc"", ""actor"": p.class_name, ""doc"": p.on_notify.__doc__, } ) while not glfw.glfwWindowShouldClose(main_window): # fetch newest notifications new_notifications = [] while notify_sub.new_data: t, n = notify_sub.recv() new_notifications.append(n) # notify each plugin if there are new notifications: for n in new_notifications: handle_notifications(n) for p in g_pool.plugins: p.on_notify(n) events = {} # report time between now and the last loop interation events[""dt""] = get_dt() # pupil and gaze positions are added by their respective producer plugins events[""pupil""] = [] events[""gaze""] = [] # allow each Plugin to do its work. for p in g_pool.plugins: p.recent_events(events) # check if a plugin need to be destroyed g_pool.plugins.clean() glfw.glfwMakeContextCurrent(main_window) # render visual feedback from loaded plugins if gl_utils.is_window_visible(main_window): gl_utils.glViewport(0, 0, *g_pool.camera_render_size) g_pool.capture.gl_display() for p in g_pool.plugins: p.gl_display() gl_utils.glViewport(0, 0, *window_size) try: clipboard = glfw.glfwGetClipboardString(main_window).decode() except AttributeError: # clipbaord is None, might happen on startup clipboard = """" g_pool.gui.update_clipboard(clipboard) user_input = g_pool.gui.update() if user_input.clipboard and user_input.clipboard != clipboard: # only write to clipboard if content changed glfw.glfwSetClipboardString( main_window, user_input.clipboard.encode() ) for b in user_input.buttons: button, action, mods = b x, y = glfw.glfwGetCursorPos(main_window) pos = x * hdpi_factor, y * hdpi_factor pos = normalize(pos, g_pool.camera_render_size) pos = denormalize(pos, g_pool.capture.frame_size) for p in g_pool.plugins: p.on_click(pos, button, action) for key, scancode, action, mods in user_input.keys: for p in g_pool.plugins: p.on_key(key, scancode, action, mods) for char_ in user_input.chars: for p in g_pool.plugins: p.on_char(char_) # present frames at appropriate speed g_pool.seek_control.wait(events[""frame""].timestamp) glfw.glfwSwapBuffers(main_window) glfw.glfwPollEvents() session_settings[""loaded_plugins""] = g_pool.plugins.get_initializers() session_settings[""min_data_confidence""] = g_pool.min_data_confidence session_settings[ ""min_calibration_confidence"" ] = g_pool.min_calibration_confidence session_settings[""gui_scale""] = g_pool.gui_user_scale session_settings[""ui_config""] = g_pool.gui.configuration session_settings[""window_position""] = glfw.glfwGetWindowPos(main_window) session_settings[""version""] = str(g_pool.version) session_window_size = glfw.glfwGetWindowSize(main_window) if 0 not in session_window_size: session_settings[""window_size""] = session_window_size session_settings.close() # de-init all running plugins for p in g_pool.plugins: p.alive = False g_pool.plugins.clean() g_pool.gui.terminate() glfw.glfwDestroyWindow(main_window) except: import traceback trace = traceback.format_exc() logger.error(""Process Player crashed with trace:\n{}"".format(trace)) finally: logger.info(""Process shutting down."") ipc_pub.notify({""subject"": ""player_process.stopped""}) sleep(1.0) ","def player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir, app_version): # general imports from time import sleep import logging from glob import glob from time import time, strftime, localtime # networking import zmq import zmq_tools import numpy as np # zmq ipc setup zmq_ctx = zmq.Context() ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url) notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=(""notify"",)) # log setup logging.getLogger(""OpenGL"").setLevel(logging.ERROR) logger = logging.getLogger() logger.handlers = [] logger.setLevel(logging.NOTSET) logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url)) # create logger for the context of this function logger = logging.getLogger(__name__) try: from background_helper import IPC_Logging_Task_Proxy IPC_Logging_Task_Proxy.push_url = ipc_push_url from tasklib.background.patches import IPCLoggingPatch IPCLoggingPatch.ipc_push_url = ipc_push_url # imports from file_methods import Persistent_Dict, next_export_sub_dir # display import glfw # check versions for our own depedencies as they are fast-changing from pyglui import __version__ as pyglui_version from pyglui import ui, cygl from pyglui.cygl.utils import Named_Texture, RGBA import gl_utils # capture from video_capture import File_Source # helpers/utils from version_utils import VersionFormat from methods import normalize, denormalize, delta_t, get_system_info import player_methods as pm from csv_utils import write_key_value_file # Plug-ins from plugin import Plugin, Plugin_List, import_runtime_plugins from plugin_manager import Plugin_Manager from vis_circle import Vis_Circle from vis_cross import Vis_Cross from vis_polyline import Vis_Polyline from vis_light_points import Vis_Light_Points from vis_watermark import Vis_Watermark from vis_fixation import Vis_Fixation # from vis_scan_path import Vis_Scan_Path from vis_eye_video_overlay import Vis_Eye_Video_Overlay from seek_control import Seek_Control from offline_surface_tracker import Offline_Surface_Tracker # from marker_auto_trim_marks import Marker_Auto_Trim_Marks from fixation_detector import Offline_Fixation_Detector from eye_movement_detector import Offline_Eye_Movement_Detector from log_display import Log_Display from annotations import Annotation_Player from raw_data_exporter import Raw_Data_Exporter from log_history import Log_History from pupil_producers import Pupil_From_Recording, Offline_Pupil_Detection from gaze_producer.gaze_from_recording import GazeFromRecording from gaze_producer.gaze_from_offline_calibration import ( GazeFromOfflineCalibration, ) from system_graphs import System_Graphs from system_timelines import System_Timelines from blink_detection import Offline_Blink_Detection from audio_playback import Audio_Playback from video_export.plugins.imotions_exporter import iMotions_Exporter from video_export.plugins.eye_video_exporter import Eye_Video_Exporter from video_export.plugins.world_video_exporter import World_Video_Exporter from video_capture import File_Source assert VersionFormat(pyglui_version) >= VersionFormat( ""1.23"" ), ""pyglui out of date, please upgrade to newest version"" runtime_plugins = import_runtime_plugins(os.path.join(user_dir, ""plugins"")) system_plugins = [ Log_Display, Seek_Control, Plugin_Manager, System_Graphs, System_Timelines, Audio_Playback, ] user_plugins = [ Vis_Circle, Vis_Fixation, Vis_Polyline, Vis_Light_Points, Vis_Cross, Vis_Watermark, Vis_Eye_Video_Overlay, # Vis_Scan_Path, Offline_Fixation_Detector, Offline_Eye_Movement_Detector, Offline_Blink_Detection, Offline_Surface_Tracker, Raw_Data_Exporter, Annotation_Player, Log_History, Pupil_From_Recording, Offline_Pupil_Detection, GazeFromRecording, GazeFromOfflineCalibration, World_Video_Exporter, iMotions_Exporter, Eye_Video_Exporter, ] + runtime_plugins plugins = system_plugins + user_plugins # Callback functions def on_resize(window, w, h): nonlocal window_size nonlocal hdpi_factor if w == 0 or h == 0: return hdpi_factor = glfw.getHDPIFactor(window) g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor window_size = w, h g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h g_pool.gui.update_window(*window_size) g_pool.gui.collect_menus() for p in g_pool.plugins: p.on_window_resize(window, *g_pool.camera_render_size) def on_window_key(window, key, scancode, action, mods): g_pool.gui.update_key(key, scancode, action, mods) def on_window_char(window, char): g_pool.gui.update_char(char) def on_window_mouse_button(window, button, action, mods): g_pool.gui.update_button(button, action, mods) def on_pos(window, x, y): x, y = x * hdpi_factor, y * hdpi_factor g_pool.gui.update_mouse(x, y) pos = x, y pos = normalize(pos, g_pool.camera_render_size) # Position in img pixels pos = denormalize(pos, g_pool.capture.frame_size) for p in g_pool.plugins: p.on_pos(pos) def on_scroll(window, x, y): g_pool.gui.update_scroll(x, y * scroll_factor) def on_drop(window, count, paths): for x in range(count): new_rec_dir = paths[x].decode(""utf-8"") if pm.is_pupil_rec_dir(new_rec_dir): logger.debug(""Starting new session with '{}'"".format(new_rec_dir)) ipc_pub.notify( { ""subject"": ""player_drop_process.should_start"", ""rec_dir"": new_rec_dir, } ) glfw.glfwSetWindowShouldClose(window, True) else: logger.error( ""'{}' is not a valid pupil recording"".format(new_rec_dir) ) tick = delta_t() def get_dt(): return next(tick) meta_info = pm.load_meta_info(rec_dir) # log info about Pupil Platform and Platform in player.log logger.info(""Application Version: {}"".format(app_version)) logger.info(""System Info: {}"".format(get_system_info())) icon_bar_width = 50 window_size = None hdpi_factor = 1.0 # create container for globally scoped vars g_pool = Global_Container() g_pool.app = ""player"" g_pool.zmq_ctx = zmq_ctx g_pool.ipc_pub = ipc_pub g_pool.ipc_pub_url = ipc_pub_url g_pool.ipc_sub_url = ipc_sub_url g_pool.ipc_push_url = ipc_push_url g_pool.plugin_by_name = {p.__name__: p for p in plugins} g_pool.camera_render_size = None valid_ext = ("".mp4"", "".mkv"", "".avi"", "".h264"", "".mjpeg"", "".fake"") video_path = [ f for f in glob(os.path.join(rec_dir, ""world.*"")) if os.path.splitext(f)[1] in valid_ext ][0] File_Source( g_pool, timing=""external"", source_path=video_path, buffered_decoding=True, fill_gaps=True, ) # load session persistent settings session_settings = Persistent_Dict( os.path.join(user_dir, ""user_settings_player"") ) if VersionFormat(session_settings.get(""version"", ""0.0"")) != app_version: logger.info( ""Session setting are a different version of this app. I will not use those."" ) session_settings.clear() width, height = g_pool.capture.frame_size width += icon_bar_width width, height = session_settings.get(""window_size"", (width, height)) window_pos = session_settings.get(""window_position"", window_position_default) window_name = ""Pupil Player: {} - {}"".format( meta_info[""Recording Name""], os.path.split(rec_dir)[-1] ) glfw.glfwInit() main_window = glfw.glfwCreateWindow(width, height, window_name, None, None) glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1]) glfw.glfwMakeContextCurrent(main_window) cygl.utils.init() g_pool.main_window = main_window def set_scale(new_scale): g_pool.gui_user_scale = new_scale window_size = ( g_pool.camera_render_size[0] + int(icon_bar_width * g_pool.gui_user_scale * hdpi_factor), glfw.glfwGetFramebufferSize(main_window)[1], ) logger.warning(icon_bar_width * g_pool.gui_user_scale * hdpi_factor) glfw.glfwSetWindowSize(main_window, *window_size) # load pupil_positions, gaze_positions g_pool.binocular = meta_info.get(""Eye Mode"", ""monocular"") == ""binocular"" g_pool.version = app_version g_pool.timestamps = g_pool.capture.timestamps g_pool.get_timestamp = lambda: 0.0 g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.meta_info = meta_info g_pool.min_data_confidence = session_settings.get(""min_data_confidence"", 0.6) g_pool.min_calibration_confidence = session_settings.get( ""min_calibration_confidence"", 0.8 ) # populated by producers g_pool.pupil_positions = pm.Bisector() g_pool.pupil_positions_by_id = (pm.Bisector(), pm.Bisector()) g_pool.gaze_positions = pm.Bisector() g_pool.fixations = pm.Affiliator() g_pool.eye_movements = pm.Affiliator() def set_data_confidence(new_confidence): g_pool.min_data_confidence = new_confidence notification = {""subject"": ""min_data_confidence_changed""} notification[""_notify_time_""] = time() + 0.8 g_pool.ipc_pub.notify(notification) def do_export(_): left_idx = g_pool.seek_control.trim_left right_idx = g_pool.seek_control.trim_right export_range = left_idx, right_idx + 1 # exclusive range.stop export_dir = os.path.join(g_pool.rec_dir, ""exports"") export_dir = next_export_sub_dir(export_dir) os.makedirs(export_dir) logger.info('Created export dir at ""{}""'.format(export_dir)) export_info = { ""Player Software Version"": str(g_pool.version), ""Data Format Version"": meta_info[""Data Format Version""], ""Export Date"": strftime(""%d.%m.%Y"", localtime()), ""Export Time"": strftime(""%H:%M:%S"", localtime()), ""Frame Index Range:"": g_pool.seek_control.get_frame_index_trim_range_string(), ""Relative Time Range"": g_pool.seek_control.get_rel_time_trim_range_string(), ""Absolute Time Range"": g_pool.seek_control.get_abs_time_trim_range_string(), } with open(os.path.join(export_dir, ""export_info.csv""), ""w"") as csv: write_key_value_file(csv, export_info) notification = { ""subject"": ""should_export"", ""range"": export_range, ""export_dir"": export_dir, } g_pool.ipc_pub.notify(notification) def reset_restart(): logger.warning(""Resetting all settings and restarting Player."") glfw.glfwSetWindowShouldClose(main_window, True) ipc_pub.notify({""subject"": ""clear_settings_process.should_start""}) ipc_pub.notify( { ""subject"": ""player_process.should_start"", ""rec_dir"": rec_dir, ""delay"": 2.0, } ) def toggle_general_settings(collapsed): # this is the menu toggle logic. # Only one menu can be open. # If no menu is open the menubar should collapse. g_pool.menubar.collapsed = collapsed for m in g_pool.menubar.elements: m.collapsed = True general_settings.collapsed = collapsed g_pool.gui = ui.UI() g_pool.gui_user_scale = session_settings.get(""gui_scale"", 1.0) g_pool.menubar = ui.Scrolling_Menu( ""Settings"", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos=""left"" ) g_pool.iconbar = ui.Scrolling_Menu( ""Icons"", pos=(-icon_bar_width, 0), size=(0, 0), header_pos=""hidden"" ) g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0)) g_pool.timelines.horizontal_constraint = g_pool.menubar g_pool.user_timelines = ui.Timeline_Menu( ""User Timelines"", pos=(0.0, -150.0), size=(0.0, 0.0), header_pos=""headline"" ) g_pool.user_timelines.color = RGBA(a=0.0) g_pool.user_timelines.collapsed = True # add container that constaints itself to the seekbar height vert_constr = ui.Container((0, 0), (0, -50.0), (0, 0)) vert_constr.append(g_pool.user_timelines) g_pool.timelines.append(vert_constr) def set_window_size(): f_width, f_height = g_pool.capture.frame_size f_width += int(icon_bar_width * g_pool.gui.scale) glfw.glfwSetWindowSize(main_window, f_width, f_height) general_settings = ui.Growing_Menu(""General"", header_pos=""headline"") general_settings.append(ui.Button(""Reset window size"", set_window_size)) general_settings.append( ui.Selector( ""gui_user_scale"", g_pool, setter=set_scale, selection=[0.8, 0.9, 1.0, 1.1, 1.2] + list(np.arange(1.5, 5.1, 0.5)), label=""Interface Size"", ) ) general_settings.append( ui.Info_Text(""Player Version: {}"".format(g_pool.version)) ) general_settings.append( ui.Info_Text( ""Capture Version: {}"".format(meta_info[""Capture Software Version""]) ) ) general_settings.append( ui.Info_Text( ""Data Format Version: {}"".format(meta_info[""Data Format Version""]) ) ) general_settings.append( ui.Info_Text( ""High level data, e.g. fixations, or visualizations only consider gaze data that has an equal or higher confidence than the minimum data confidence."" ) ) general_settings.append( ui.Slider( ""min_data_confidence"", g_pool, setter=set_data_confidence, step=0.05, min=0.0, max=1.0, label=""Minimum data confidence"", ) ) general_settings.append( ui.Button(""Restart with default settings"", reset_restart) ) g_pool.menubar.append(general_settings) icon = ui.Icon( ""collapsed"", general_settings, label=chr(0xE8B8), on_val=False, off_val=True, setter=toggle_general_settings, label_font=""pupil_icons"", ) icon.tooltip = ""General Settings"" g_pool.iconbar.append(icon) user_plugin_separator = ui.Separator() user_plugin_separator.order = 0.35 g_pool.iconbar.append(user_plugin_separator) g_pool.quickbar = ui.Stretching_Menu(""Quick Bar"", (0, 100), (100, -100)) g_pool.export_button = ui.Thumb( ""export"", label=chr(0xE2C5), getter=lambda: False, setter=do_export, hotkey=""e"", label_font=""pupil_icons"", ) g_pool.quickbar.extend([g_pool.export_button]) g_pool.gui.append(g_pool.menubar) g_pool.gui.append(g_pool.timelines) g_pool.gui.append(g_pool.iconbar) g_pool.gui.append(g_pool.quickbar) # we always load these plugins default_plugins = [ (""Plugin_Manager"", {}), (""Seek_Control"", {}), (""Log_Display"", {}), (""Raw_Data_Exporter"", {}), (""Vis_Polyline"", {}), (""Vis_Circle"", {}), (""System_Graphs"", {}), (""System_Timelines"", {}), (""World_Video_Exporter"", {}), (""Pupil_From_Recording"", {}), (""GazeFromRecording"", {}), (""Audio_Playback"", {}), ] g_pool.plugins = Plugin_List( g_pool, session_settings.get(""loaded_plugins"", default_plugins) ) # Manually add g_pool.capture to the plugin list g_pool.plugins._plugins.append(g_pool.capture) g_pool.plugins._plugins.sort(key=lambda p: p.order) g_pool.capture.init_ui() general_settings.insert( -1, ui.Text_Input( ""rel_time_trim_section"", getter=g_pool.seek_control.get_rel_time_trim_range_string, setter=g_pool.seek_control.set_rel_time_trim_range_string, label=""Relative time range to export"", ), ) general_settings.insert( -1, ui.Text_Input( ""frame_idx_trim_section"", getter=g_pool.seek_control.get_frame_index_trim_range_string, setter=g_pool.seek_control.set_frame_index_trim_range_string, label=""Frame index range to export"", ), ) # Register callbacks main_window glfw.glfwSetFramebufferSizeCallback(main_window, on_resize) glfw.glfwSetKeyCallback(main_window, on_window_key) glfw.glfwSetCharCallback(main_window, on_window_char) glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button) glfw.glfwSetCursorPosCallback(main_window, on_pos) glfw.glfwSetScrollCallback(main_window, on_scroll) glfw.glfwSetDropCallback(main_window, on_drop) toggle_general_settings(True) g_pool.gui.configuration = session_settings.get(""ui_config"", {}) # gl_state settings gl_utils.basic_gl_setup() g_pool.image_tex = Named_Texture() # trigger on_resize on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window)) def handle_notifications(n): subject = n[""subject""] if subject == ""start_plugin"": g_pool.plugins.add( g_pool.plugin_by_name[n[""name""]], args=n.get(""args"", {}) ) elif subject.startswith(""meta.should_doc""): ipc_pub.notify( {""subject"": ""meta.doc"", ""actor"": g_pool.app, ""doc"": player.__doc__} ) for p in g_pool.plugins: if ( p.on_notify.__doc__ and p.__class__.on_notify != Plugin.on_notify ): ipc_pub.notify( { ""subject"": ""meta.doc"", ""actor"": p.class_name, ""doc"": p.on_notify.__doc__, } ) while not glfw.glfwWindowShouldClose(main_window): # fetch newest notifications new_notifications = [] while notify_sub.new_data: t, n = notify_sub.recv() new_notifications.append(n) # notify each plugin if there are new notifications: for n in new_notifications: handle_notifications(n) for p in g_pool.plugins: p.on_notify(n) events = {} # report time between now and the last loop interation events[""dt""] = get_dt() # pupil and gaze positions are added by their respective producer plugins events[""pupil""] = [] events[""gaze""] = [] # allow each Plugin to do its work. for p in g_pool.plugins: p.recent_events(events) # check if a plugin need to be destroyed g_pool.plugins.clean() glfw.glfwMakeContextCurrent(main_window) # render visual feedback from loaded plugins if gl_utils.is_window_visible(main_window): gl_utils.glViewport(0, 0, *g_pool.camera_render_size) g_pool.capture.gl_display() for p in g_pool.plugins: p.gl_display() gl_utils.glViewport(0, 0, *window_size) try: clipboard = glfw.glfwGetClipboardString(main_window).decode() except AttributeError: # clipbaord is None, might happen on startup clipboard = """" g_pool.gui.update_clipboard(clipboard) user_input = g_pool.gui.update() if user_input.clipboard and user_input.clipboard != clipboard: # only write to clipboard if content changed glfw.glfwSetClipboardString( main_window, user_input.clipboard.encode() ) for b in user_input.buttons: button, action, mods = b x, y = glfw.glfwGetCursorPos(main_window) pos = x * hdpi_factor, y * hdpi_factor pos = normalize(pos, g_pool.camera_render_size) pos = denormalize(pos, g_pool.capture.frame_size) for p in g_pool.plugins: p.on_click(pos, button, action) for key, scancode, action, mods in user_input.keys: for p in g_pool.plugins: p.on_key(key, scancode, action, mods) for char_ in user_input.chars: for p in g_pool.plugins: p.on_char(char_) # present frames at appropriate speed g_pool.seek_control.wait(events[""frame""].timestamp) glfw.glfwSwapBuffers(main_window) glfw.glfwPollEvents() session_settings[""loaded_plugins""] = g_pool.plugins.get_initializers() session_settings[""min_data_confidence""] = g_pool.min_data_confidence session_settings[ ""min_calibration_confidence"" ] = g_pool.min_calibration_confidence session_settings[""gui_scale""] = g_pool.gui_user_scale session_settings[""ui_config""] = g_pool.gui.configuration session_settings[""window_position""] = glfw.glfwGetWindowPos(main_window) session_settings[""version""] = str(g_pool.version) session_window_size = glfw.glfwGetWindowSize(main_window) if 0 not in session_window_size: session_settings[""window_size""] = session_window_size session_settings.close() # de-init all running plugins for p in g_pool.plugins: p.alive = False g_pool.plugins.clean() g_pool.gui.terminate() glfw.glfwDestroyWindow(main_window) except: import traceback trace = traceback.format_exc() logger.error(""Process Player crashed with trace:\n{}"".format(trace)) finally: logger.info(""Process shutting down."") ipc_pub.notify({""subject"": ""player_process.stopped""}) sleep(1.0) " 32024,"def revoke_user_session_command(client: MsGraphClient, args: Dict): user = args.get('user') client.revoke_user_session(user) human_readable = f'User: ""{user}"" session has been revoked successfully' return human_readable, None, None ","def revoke_user_session_command(client: MsGraphClient, args: Dict): user = args.get('user') client.revoke_user_session(user) human_readable = f'User: ""{user}"" sessions have been revoked successfully.' return human_readable, None, None " 40660,"def get_tb_writer(tensorboard_log_dir: Optional[str]) -> Optional[TBWriter]: try: if tensorboard_log_dir is not None: tb_writer = TBWriter(tensorboard_log_dir) else: tb_writer = None except ImportError: log.error('Failed to import SummaryWriter from torch.utils.tensorboard.Failed to initialize Tensorboard ' 'logger. Install appropriate Pytorch version to use this logger or remove tensorboard_log_dir ' 'parameter from the train parameters list in the configuration file.') tb_writer = None return tb_writer ","def get_tb_writer(tensorboard_log_dir: Optional[str]) -> Optional[TBWriter]: try: if tensorboard_log_dir is not None: tb_writer = TBWriter(tensorboard_log_dir) else: tb_writer = None except ImportError: log.error('Failed to import SummaryWriter from torch.utils.tensorboard. Failed to initialize Tensorboard ' 'logger. Install appropriate Pytorch version to use this logger or remove tensorboard_log_dir ' 'parameter from the train parameters list in the configuration file.') tb_writer = None return tb_writer " 26921,"def chain(*tasks: Union[BaseOperator, ""XComArg"", Sequence[BaseOperator], Sequence[""XComArg""]]): r"""""" Given a number of tasks, builds a dependency chain. Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator] or List[airflow.models.XComArg], you have to make sure they have same length. .. code-block:: python chain(t1, [t2, t3], [t4, t5], t6) is equivalent to:: / -> t2 -> t4 \ t1 -> t6 \ -> t3 -> t5 / .. code-block:: python t1.set_downstream(t2) t1.set_downstream(t3) t2.set_downstream(t4) t3.set_downstream(t5) t4.set_downstream(t6) t5.set_downstream(t6) :param tasks: List of tasks, List[airflow.models.BaseOperator], XComArg, or List[airflow.models.XComArg] to set dependencies :type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg], or XComArg """""" from airflow.models.xcom_arg import XComArg for index, up_task in enumerate(tasks[:-1]): down_task = tasks[index + 1] if isinstance(up_task, (BaseOperator, XComArg)): up_task.set_downstream(down_task) continue if isinstance(down_task, (BaseOperator, XComArg)): down_task.set_upstream(up_task) continue if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence): raise TypeError( ""Chain not supported between instances of {up_type} and {down_type}"".format( up_type=type(up_task), down_type=type(down_task) ) ) up_task_list = up_task down_task_list = down_task if len(up_task_list) != len(down_task_list): raise AirflowException( f""Chain not supported different length Iterable "" f""but get {len(up_task_list)} and {len(down_task_list)}"" ) for up_t, down_t in zip(up_task_list, down_task_list): up_t.set_downstream(down_t) ","def chain(*tasks: Union[BaseOperator, ""XComArg"", Sequence[BaseOperator], Sequence[""XComArg""]]): r"""""" Given a number of tasks, builds a dependency chain. Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator] or List[airflow.models.XComArg], you have to make sure they have same length. .. code-block:: python chain(t1, [t2, t3], [t4, t5], t6) is equivalent to:: / -> t2 -> t4 \ t1 -> t6 \ -> t3 -> t5 / .. code-block:: python t1.set_downstream(t2) t1.set_downstream(t3) t2.set_downstream(t4) t3.set_downstream(t5) t4.set_downstream(t6) t5.set_downstream(t6) :param tasks: List of tasks or XComArgs to set dependencies :type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg], or XComArg """""" from airflow.models.xcom_arg import XComArg for index, up_task in enumerate(tasks[:-1]): down_task = tasks[index + 1] if isinstance(up_task, (BaseOperator, XComArg)): up_task.set_downstream(down_task) continue if isinstance(down_task, (BaseOperator, XComArg)): down_task.set_upstream(up_task) continue if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence): raise TypeError( ""Chain not supported between instances of {up_type} and {down_type}"".format( up_type=type(up_task), down_type=type(down_task) ) ) up_task_list = up_task down_task_list = down_task if len(up_task_list) != len(down_task_list): raise AirflowException( f""Chain not supported different length Iterable "" f""but get {len(up_task_list)} and {len(down_task_list)}"" ) for up_t, down_t in zip(up_task_list, down_task_list): up_t.set_downstream(down_t) " 59303,"def _ks_assemble(asm, mode=CS_MODE_ARM): global ks, ks_thumb from keystone import Ks, KS_ARCH_ARM, KS_MODE_ARM, KS_MODE_THUMB if ks is None: ks = Ks(KS_ARCH_ARM, KS_MODE_ARM) if ks_thumb is None: ks_thumb = Ks(KS_ARCH_ARM, KS_MODE_THUMB) if CS_MODE_ARM == mode: ords = ks.asm(asm)[0] elif CS_MODE_THUMB == mode: ords = ks_thumb.asm(asm)[0] else: raise Exception(f""bad processor mode for assembly: {mode}"") if not ords: raise Exception(f""bad assembly: {asm}"") return binascii.hexlify(bytearray(ords)) ","def _ks_assemble(asm: str, mode=CS_MODE_ARM) -> bytes: """"""Assemble the given string using Keystone using the specified CPU mode."""""" # Explicitly uses late importing so that Keystone will only be imported if this is called. # This lets us avoid requiring installation of Keystone for running tests. global ks, ks_thumb from keystone import Ks, KS_ARCH_ARM, KS_MODE_ARM, KS_MODE_THUMB if ks is None: ks = Ks(KS_ARCH_ARM, KS_MODE_ARM) if ks_thumb is None: ks_thumb = Ks(KS_ARCH_ARM, KS_MODE_THUMB) if CS_MODE_ARM == mode: ords = ks.asm(asm)[0] elif CS_MODE_THUMB == mode: ords = ks_thumb.asm(asm)[0] else: raise Exception(f""bad processor mode for assembly: {mode}"") if not ords: raise Exception(f""bad assembly: {asm}"") return binascii.hexlify(bytearray(ords)) " 48389,"def cryptography_compare_public_keys(key1, key2): '''Tests whether two public keys are the same. Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers(). ''' if CRYPTOGRAPHY_HAS_ED25519: a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) if a or b: if not a or not b: return False a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) return a == b if CRYPTOGRAPHY_HAS_ED448: a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) if a or b: if not a or not b: return False a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) return a == b return key1.public_numbers() == key2.public_numbers() ","def cryptography_compare_public_keys(key1, key2): '''Tests whether two public keys are the same. Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers(). ''' if CRYPTOGRAPHY_HAS_ED25519: a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey) if a or b: if not a or not b: return False a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) return a == b if CRYPTOGRAPHY_HAS_ED448: a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey) if not a or not b: return False else: a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) return a == b if not a or not b: return False a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw) return a == b return key1.public_numbers() == key2.public_numbers() " 27253,"def test_table_drop_consistency(): t = ibis.table( [('a', 'int64'), ('b', 'string'), ('c', 'timestamp')], name='t' ) e1 = t.projection([""a"", ""c""]) e2 = t.drop([""b""]) e3 = t.drop(""b"") assert e1.schema() == e2.schema() assert e1.schema() == e3.schema() assert not(e1.schema() == t.schema()) assert not(""b"" in e1.columns) assert ""a"" in e1.columns assert ""c"" in e2.columns ","def test_table_drop_consistency(): t = ibis.table( [('a', 'int64'), ('b', 'string'), ('c', 'timestamp')], name='t' ) e1 = t.projection([""a"", ""c""]) e2 = t.drop([""b""]) e3 = t.drop(""b"") assert e1.schema() == e2.schema() assert e1.schema() == e3.schema() assert not(e1.schema() == t.schema()) assert ""b"" not in e1.columns assert ""a"" in e1.columns assert ""c"" in e2.columns " 36536,"def create(*, isolated=True): """""" create() -> Interpreter Initialize a new (idle) Python interpreter. """""" id = _interpreters.create() return Interpreter(id) ","def create(*, isolated=True): """""" create() -> Interpreter Initialize a new (idle) Python interpreter. """""" id = _interpreters.create(isolated=isolated) return Interpreter(id) " 57913,"def redlock_get_scan_status(): """""" Get DevOps Scan Status """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={}) if ( not response or 'data' not in response ): demisto.results('No results found') else: result = response['data'] readable_output = { ""ID"": result.get('id'), ""Status"": result.get('attributes')['status'] } md = tableToMarkdown(""Scan Status:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': result}, 'HumanReadable': md }) ","def redlock_get_scan_status(): """""" Returns the status of the asynchronous IaC scan job that has the specified scan ID. See Also: https://prisma.pan.dev/api/cloud/cspm/iac-scan/#operation/getAsyncScanStatus """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={}) if ( not response or 'data' not in response ): demisto.results('No results found') else: result = response['data'] readable_output = { ""ID"": result.get('id'), ""Status"": result.get('attributes')['status'] } md = tableToMarkdown(""Scan Status:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': result}, 'HumanReadable': md }) " 34568,"def _request_export_info() -> Tuple[Text, Text, Text]: """"""Request file path and export stories & nlu data to that path"""""" # export training data and quit questions = questionary.form( export_stories=questionary.text( message=""Export stories to (if file exists, this "" ""will append the stories)"", default=PATHS[""stories""], validate=io_utils.file_type_validator( ["".md"", "".yml""], ""Please provide a valid export path for the stories, e.g. 'stories.md'."", ), ), export_nlu=questionary.text( message=""Export NLU data to (if file exists, this will "" ""merge learned data with previous training examples)"", default=PATHS[""nlu""], validate=io_utils.file_type_validator( ["".md"", "".json"", "".yml""], ""Please provide a valid export path for the NLU data, e.g. 'nlu.md'."", ), ), export_domain=questionary.text( message=""Export domain file to (if file exists, this "" ""will be overwritten)"", default=PATHS[""domain""], validate=io_utils.file_type_validator( ["".yml"", "".yaml""], ""Please provide a valid export path for the domain file, e.g. 'domain.yml'."", ), ), ) answers = questions.ask() if not answers: raise Abort() return answers[""export_stories""], answers[""export_nlu""], answers[""export_domain""] ","def _request_export_info() -> Tuple[Text, Text, Text]: """"""Request file path and export stories & nlu data to that path"""""" # export training data and quit questions = questionary.form( export_stories=questionary.text( message=""Export stories to (if file exists, this "" ""will append the stories)"", default=PATHS[""stories""], validate=io_utils.file_type_validator( ["".md"", "".yml""], ""Please provide a valid export path for the stories, e.g. 'stories.md'."", ), ), export_nlu=questionary.text( message=""Export NLU data to (if file exists, this will "" ""merge learned data with previous training examples)"", default=PATHS[""nlu""], validate=io_utils.file_type_validator( ["".md"", "".json"", "".yml""], ""Please provide a valid export path for the NLU data, e.g. 'nlu.yml'."", ), ), export_domain=questionary.text( message=""Export domain file to (if file exists, this "" ""will be overwritten)"", default=PATHS[""domain""], validate=io_utils.file_type_validator( ["".yml"", "".yaml""], ""Please provide a valid export path for the domain file, e.g. 'domain.yml'."", ), ), ) answers = questions.ask() if not answers: raise Abort() return answers[""export_stories""], answers[""export_nlu""], answers[""export_domain""] " 31021,"def panorama_route_lookup(dest_ip: str, virtual_router=None): """""" Given the provided ip address, looks up the outgoing interface and zone on the firewall. """""" if not VSYS: raise Exception(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."") response = panorama_get_routes(virtual_router) if 'entry' not in response['response']['result']: raise Exception(""No routes returned from the Firewall."") else: routes = response['response']['result']['entry'] ip_addr = ipaddress.ip_address(dest_ip) current_match = None matched_route = None for route in routes: subnet_raw = route['destination'] subnet = ipaddress.ip_network(subnet_raw) # If the given IP address is in the subnet if ip_addr in subnet: # IF we haven't matched yet if not current_match: current_match = subnet matched_route = route # If this is a greater subnet elif subnet.prefixlen > current_match.prefixlen: current_match = subnet matched_route = route if matched_route: return matched_route else: raise Exception(""Route not found."") ","def panorama_route_lookup(dest_ip: str, virtual_router=None): """""" Given the provided ip address, looks up the outgoing interface and zone on the firewall. """""" if not VSYS: raise DemistoException(""The 'panorama-route-lookup' command is only relevant for a Firewall instance."") response = panorama_get_routes(virtual_router) if 'entry' not in response['response']['result']: raise Exception(""No routes returned from the Firewall."") else: routes = response['response']['result']['entry'] ip_addr = ipaddress.ip_address(dest_ip) current_match = None matched_route = None for route in routes: subnet_raw = route['destination'] subnet = ipaddress.ip_network(subnet_raw) # If the given IP address is in the subnet if ip_addr in subnet: # IF we haven't matched yet if not current_match: current_match = subnet matched_route = route # If this is a greater subnet elif subnet.prefixlen > current_match.prefixlen: current_match = subnet matched_route = route if matched_route: return matched_route else: raise Exception(""Route not found."") " 21867,"def _check_create(event: ""EventBase"") -> None: """"""Implementation of the auth rules for m.room.create events Args: event: The event to be checked Raises: AuthError if the event does not pass the auth rules """""" assert event.type == EventTypes.Create # 1.1 If it has any previous events, reject. if event.prev_event_ids(): raise AuthError(403, ""Create event has prev events"") # 1.2 If the domain of the room_id does not match the domain of the sender, # reject. sender_domain = get_domain_from_id(event.sender) room_id_domain = get_domain_from_id(event.room_id) if room_id_domain != sender_domain: raise AuthError(403, ""Creation event's room_id domain does not match sender's"") # 1.3 If content.room_version is present and is not a recognised version, reject room_version_prop = event.content.get(""room_version"", ""1"") if room_version_prop not in KNOWN_ROOM_VERSIONS: raise AuthError( 403, ""room appears to have unsupported version %s"" % (room_version_prop,), ) # 1.4 If content has no creator field, reject. if EventContentFields.ROOM_CREATOR not in event.content: raise AuthError(403, ""Create event lacks a 'creator' property"") ","def _check_create(event: ""EventBase"") -> None: """"""Implementation of the auth rules for m.room.create events Args: event: The `m.room.create` event to be checked Raises: AuthError if the event does not pass the auth rules """""" assert event.type == EventTypes.Create # 1.1 If it has any previous events, reject. if event.prev_event_ids(): raise AuthError(403, ""Create event has prev events"") # 1.2 If the domain of the room_id does not match the domain of the sender, # reject. sender_domain = get_domain_from_id(event.sender) room_id_domain = get_domain_from_id(event.room_id) if room_id_domain != sender_domain: raise AuthError(403, ""Creation event's room_id domain does not match sender's"") # 1.3 If content.room_version is present and is not a recognised version, reject room_version_prop = event.content.get(""room_version"", ""1"") if room_version_prop not in KNOWN_ROOM_VERSIONS: raise AuthError( 403, ""room appears to have unsupported version %s"" % (room_version_prop,), ) # 1.4 If content has no creator field, reject. if EventContentFields.ROOM_CREATOR not in event.content: raise AuthError(403, ""Create event lacks a 'creator' property"") " 59340,"def power_get(host, port, index): assert port is None index = int(index) assert 1 <= index <= NUMBER_OF_OUTLETS _snmp = _Snmp(host, 'public') output_status_oid = ""{}.2.0.{}"".format(OID, index) value = _snmp.get(output_status_oid) if(value == 1): # On return True if(value == 0): # Off return False if(value == 3): # Pending on - treat as on return True if(value == 2): # Pending off - treat as off return False raise ExecutionError(""failed to get SNMP value"") ","def power_get(host, port, index): assert port is None index = int(index) assert 1 <= index <= NUMBER_OF_OUTLETS _snmp = _Snmp(host, 'public') output_status_oid = ""{}.2.0.{}"".format(OID, index) value = _snmp.get(output_status_oid) if value in (1, 3): # on or pending on return True if value in (0, 2): # off or pending off return False raise ExecutionError(""failed to get SNMP value"") " 30960,"def check_if_user_exists_by_samaccountname(default_base_dn, page_size, samaccountname): query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))' entries = search_with_paging( query, default_base_dn, attributes=[""samaccountname""], size_limit=DEFAULT_LIMIT, page_size=page_size ) if entries.get('flat'): return True return False ","def check_if_user_exists_by_samaccountname(default_base_dn, page_size, samaccountname): query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))' entries = search_with_paging( query, default_base_dn, attributes=[""samaccountname""], size_limit=DEFAULT_LIMIT, page_size=1 ) if entries.get('flat'): return True return False " 46754,"def _add_subdirs(regex_string, variant, datatype, entity_definitions, modality_datatypes): """"""Add appropriate subdirectories as required by entities present."""""" label = ""([a-z,A-Z,0-9]*?)"" regex_dirs = ""/"" for dir_entity in DIR_ENTITIES: if dir_entity in variant[""entities""].keys(): shorthand = entity_definitions[dir_entity][""entity""] if variant[""entities""][dir_entity] == ""required"": regex_subdir = f""{shorthand}-(?P<{dir_entity}>{label})/"" else: regex_subdir = f""(|{shorthand}-(?P<{dir_entity}>{label})/)"" regex_dirs = f""{regex_dirs}{regex_subdir}"" if datatype in modality_datatypes: regex_dirs = f""{regex_dirs}{datatype}/"" regex_string = f""{regex_dirs}{regex_string}"" return regex_string ","def _add_subdirs(regex_string, variant, datatype, entity_definitions, modality_datatypes): """"""Add appropriate subdirectories as required by entities present."""""" label = ""([a-zA-Z0-9]*?)"" regex_dirs = ""/"" for dir_entity in DIR_ENTITIES: if dir_entity in variant[""entities""].keys(): shorthand = entity_definitions[dir_entity][""entity""] if variant[""entities""][dir_entity] == ""required"": regex_subdir = f""{shorthand}-(?P<{dir_entity}>{label})/"" else: regex_subdir = f""(|{shorthand}-(?P<{dir_entity}>{label})/)"" regex_dirs = f""{regex_dirs}{regex_subdir}"" if datatype in modality_datatypes: regex_dirs = f""{regex_dirs}{datatype}/"" regex_string = f""{regex_dirs}{regex_string}"" return regex_string " 35109,"def suggest_index_map( buffer: Buffer, indices: List[PrimExpr], loops: List[For], predicate: PrimExpr, ) -> Optional[IndexMap]: """"""Provided the access pattern to a buffer, suggest one of the possible layout transformation to minimize the locality of the access pattern. Parameters ---------- buffer : Buffer The buffer to be transformed. indices : List[PrimExpr] The access pattern to the buffer. loops : List[For] The loops above the buffer. predicate : PrimExpr The predicate of the access. Returns ------- index_map : Optional[IndexMap] The suggested index map. None if no transformation is suggested. """""" return _ffi_api.SuggestIndexMap( # type: ignore # pylint: disable=no-member buffer, indices, loops, predicate, ) ","def suggest_index_map( buffer: Buffer, indices: List[PrimExpr], loops: List[For], predicate: PrimExpr, ) -> Optional[IndexMap]: """"""Provided the access pattern to a buffer, suggest one of the possible layout transformation to maximize the locality of the access pattern. Parameters ---------- buffer : Buffer The buffer to be transformed. indices : List[PrimExpr] The access pattern to the buffer. loops : List[For] The loops above the buffer. predicate : PrimExpr The predicate of the access. Returns ------- index_map : Optional[IndexMap] The suggested index map. None if no transformation is suggested. """""" return _ffi_api.SuggestIndexMap( # type: ignore # pylint: disable=no-member buffer, indices, loops, predicate, ) " 43910,"def var_param_shift(tape, argnum, shift=np.pi / 2, gradient_recipes=None, f0=None): r""""""Generate the parameter-shift tapes and postprocessing methods required to compute the gradient of a gate parameter with respect to a variance value. Args: tape (.QuantumTape): quantum tape to differentiate argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipes`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the gradient recipe contains an unshifted term, this value is used, saving a quantum evaluation. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the results of the evaluated tapes. """""" argnum = argnum or tape.trainable_params # Determine the locations of any variance measurements in the measurement queue. var_mask = [m.return_type is qml.operation.Variance for m in tape.measurements] var_idx = np.where(var_mask)[0] # Get , the expectation value of the tape with unshifted parameters. expval_tape = tape.copy(copy_operations=True) # Convert all variance measurements on the tape into expectation values for i in var_idx: obs = expval_tape._measurements[i].obs expval_tape._measurements[i] = qml.measure.MeasurementProcess( qml.operation.Expectation, obs=obs ) gradient_tapes = [expval_tape] # evaluate the analytic derivative of pdA_tapes, pdA_fn = expval_param_shift(expval_tape, argnum, shift, gradient_recipes, f0) gradient_tapes.extend(pdA_tapes) # Store the number of first derivative tapes, so that we know # the number of results to post-process later. tape_boundary = len(pdA_tapes) + 1 # If there are non-involutory observables A present, we must compute d/dp. # Get the indices in the measurement queue of all non-involutory # observables. non_involutory = [] for i in var_idx: obs_name = tape.observables[i].name if isinstance(obs_name, list): # Observable is a tensor product, we must investigate all constituent observables. if any(name in NONINVOLUTORY_OBS for name in obs_name): non_involutory.append(i) elif obs_name in NONINVOLUTORY_OBS: non_involutory.append(i) # For involutory observables (A^2 = I) we have d/dp = 0. involutory = set(var_idx) - set(non_involutory) if non_involutory: expval_sq_tape = tape.copy(copy_operations=True) for i in non_involutory: # We need to calculate d/dp; to do so, we replace the # involutory observables A in the queue with A^2. obs = _square_observable(expval_sq_tape._measurements[i].obs) expval_sq_tape._measurements[i] = qml.measure.MeasurementProcess( qml.operation.Expectation, obs=obs ) # Non-involutory observables are present; the partial derivative of # may be non-zero. Here, we calculate the analytic derivatives of the # observables. pdA2_tapes, pdA2_fn = expval_param_shift( expval_sq_tape, argnum, shift, gradient_recipes, f0 ) gradient_tapes.extend(pdA2_tapes) def processing_fn(results): # We need to expand the dimensions of the variance mask, # and convert it to be the same type as the results. mask = qml.math.convert_like(qml.math.reshape(var_mask, [-1, 1]), results[0]) f0 = qml.math.expand_dims(results[0], -1) pdA = pdA_fn(results[1:tape_boundary]) pdA2 = 0 if non_involutory: # compute the second derivative of non-involutory observables pdA2 = pdA2_fn(results[tape_boundary:]) if involutory: # if involutory observables are present, ensure they have zero gradient. # # For the pdA2_tapes, we have replaced non-involutory # observables with their square (A -> A^2). However, # involutory observables have been left as-is (A), and have # not been replaced by their square (A^2 = I). As a result, # components of the gradient vector will not be correct. We # need to replace the gradient value with 0 (the known, # correct gradient for involutory variables). m = [tape.observables[i].name not in NONINVOLUTORY_OBS for i in var_idx] m = qml.math.convert_like(m, pdA2) pdA2 = qml.math.where(qml.math.reshape(m, [-1, 1]), 0, pdA2) # return d(var(A))/dp = d/dp -2 * * d/dp for the variances (mask==True) # d/dp for plain expectations (mask==False) return qml.math.where(mask, pdA2 - 2 * f0 * pdA, pdA) return gradient_tapes, processing_fn ","def var_param_shift(tape, argnum, shift=np.pi / 2, gradient_recipes=None, f0=None): r""""""Generate the parameter-shift tapes and postprocessing methods required to compute the gradient of a gate parameter with respect to a variance value. Args: tape (.QuantumTape): quantum tape to differentiate This gradient transform can be applied directly to :class:`~.QNode` objects: argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipes`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the gradient recipe contains an unshifted term, this value is used, saving a quantum evaluation. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the results of the evaluated tapes. """""" argnum = argnum or tape.trainable_params # Determine the locations of any variance measurements in the measurement queue. var_mask = [m.return_type is qml.operation.Variance for m in tape.measurements] var_idx = np.where(var_mask)[0] # Get , the expectation value of the tape with unshifted parameters. expval_tape = tape.copy(copy_operations=True) # Convert all variance measurements on the tape into expectation values for i in var_idx: obs = expval_tape._measurements[i].obs expval_tape._measurements[i] = qml.measure.MeasurementProcess( qml.operation.Expectation, obs=obs ) gradient_tapes = [expval_tape] # evaluate the analytic derivative of pdA_tapes, pdA_fn = expval_param_shift(expval_tape, argnum, shift, gradient_recipes, f0) gradient_tapes.extend(pdA_tapes) # Store the number of first derivative tapes, so that we know # the number of results to post-process later. tape_boundary = len(pdA_tapes) + 1 # If there are non-involutory observables A present, we must compute d/dp. # Get the indices in the measurement queue of all non-involutory # observables. non_involutory = [] for i in var_idx: obs_name = tape.observables[i].name if isinstance(obs_name, list): # Observable is a tensor product, we must investigate all constituent observables. if any(name in NONINVOLUTORY_OBS for name in obs_name): non_involutory.append(i) elif obs_name in NONINVOLUTORY_OBS: non_involutory.append(i) # For involutory observables (A^2 = I) we have d/dp = 0. involutory = set(var_idx) - set(non_involutory) if non_involutory: expval_sq_tape = tape.copy(copy_operations=True) for i in non_involutory: # We need to calculate d/dp; to do so, we replace the # involutory observables A in the queue with A^2. obs = _square_observable(expval_sq_tape._measurements[i].obs) expval_sq_tape._measurements[i] = qml.measure.MeasurementProcess( qml.operation.Expectation, obs=obs ) # Non-involutory observables are present; the partial derivative of # may be non-zero. Here, we calculate the analytic derivatives of the # observables. pdA2_tapes, pdA2_fn = expval_param_shift( expval_sq_tape, argnum, shift, gradient_recipes, f0 ) gradient_tapes.extend(pdA2_tapes) def processing_fn(results): # We need to expand the dimensions of the variance mask, # and convert it to be the same type as the results. mask = qml.math.convert_like(qml.math.reshape(var_mask, [-1, 1]), results[0]) f0 = qml.math.expand_dims(results[0], -1) pdA = pdA_fn(results[1:tape_boundary]) pdA2 = 0 if non_involutory: # compute the second derivative of non-involutory observables pdA2 = pdA2_fn(results[tape_boundary:]) if involutory: # if involutory observables are present, ensure they have zero gradient. # # For the pdA2_tapes, we have replaced non-involutory # observables with their square (A -> A^2). However, # involutory observables have been left as-is (A), and have # not been replaced by their square (A^2 = I). As a result, # components of the gradient vector will not be correct. We # need to replace the gradient value with 0 (the known, # correct gradient for involutory variables). m = [tape.observables[i].name not in NONINVOLUTORY_OBS for i in var_idx] m = qml.math.convert_like(m, pdA2) pdA2 = qml.math.where(qml.math.reshape(m, [-1, 1]), 0, pdA2) # return d(var(A))/dp = d/dp -2 * * d/dp for the variances (mask==True) # d/dp for plain expectations (mask==False) return qml.math.where(mask, pdA2 - 2 * f0 * pdA, pdA) return gradient_tapes, processing_fn " 8755,"def etymology(word): # @@ sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) == 0: return 'No word added.' if len(word) > 25: return ""Word too long: %s[…]"" % word[:10] ety = get(ETYURI % web.quote(word)) if ety.status_code != 200: return None # Let's find it start = ety.text.find(""word__defination"") start = ety.text.find(""

"", start) stop = ety.text.find(""

"", start) sentence = ety.text[start + 3:stop] # Clean up sentence = unescape(sentence) sentence = sub('<[^<]+?>', '', sentence) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' […]' sentence = '""' + sentence.replace('""', ""'"") + '""' return sentence + ' - ' + (ETYURI % web.quote(word)) ","def etymology(word): # @@ sbp, would it be possible to have a flag for .ety to get 2nd/etc # entries? - http://swhack.com/logs/2006-07-19#T15-05-29 if len(word) == 0: raise ValueError('No word to look for.') if len(word) > 25: return ""Word too long: %s[…]"" % word[:10] ety = get(ETYURI % web.quote(word)) if ety.status_code != 200: return None # Let's find it start = ety.text.find(""word__defination"") start = ety.text.find(""

"", start) stop = ety.text.find(""

"", start) sentence = ety.text[start + 3:stop] # Clean up sentence = unescape(sentence) sentence = sub('<[^<]+?>', '', sentence) maxlength = 275 if len(sentence) > maxlength: sentence = sentence[:maxlength] words = sentence[:-5].split(' ') words.pop() sentence = ' '.join(words) + ' […]' sentence = '""' + sentence.replace('""', ""'"") + '""' return sentence + ' - ' + (ETYURI % web.quote(word)) " 3857,"def floyd_warshall_numpy(G, nodelist=None, weight=""weight""): """"""Find all-pairs shortest path lengths using Floyd's algorithm. This algorithm for finding shortest paths takes advantage of matrix representations of a graph and works well for dense graphs where all-pairs shortest path is desired. The results are returns in a numpy array with each column and row representing a node and entries providing the distance along the shortest path between that row's node and column's node. If no path exists the distance is Inf. Parameters ---------- G : NetworkX graph nodelist : list, optional (default= the order of G.nodes) The rows and columns are ordered by the nodes in nodelist. If nodelist is None then the ordering is produced by G.nodes. Nodelist should include all nodes in G. weight: string, optional (default= 'weight') Edge data key corresponding to the edge weight. Returns ------- distance : NumPy matrix A matrix of shortest path distances between nodes. If there is no path between to nodes the corresponding matrix entry will be Inf. Notes ----- Floyd's algorithm is appropriate for finding shortest paths in dense graphs or graphs with negative weights when Dijkstra's algorithm fails. This algorithm can still fail if there are negative cycles. It has running time $O(n^3)$ with running space of $O(n^2)$. Raises ------ NetworkXError If nodelist does not contain all nodes in G. """""" import numpy as np if nodelist is not None: if not (len(nodelist) == len(G) == len(set(nodelist))): msg = ( ""nodelist must contain every node in G with no repeats."" ""If you wanted a subgraph of G use G.subgraph(nodelist)"" ) raise nx.NetworkXError(msg) # To handle cases when an edge has weight=0, we must make sure that # nonedges are not given the value 0 as well. A = nx.to_numpy_array( G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf ) n, m = A.shape np.fill_diagonal(A, 0) # diagonal elements should be zero for i in range(n): # The second term has the same shape as A due to broadcasting A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) return A ","def floyd_warshall_numpy(G, nodelist=None, weight=""weight""): """"""Find all-pairs shortest path lengths using Floyd's algorithm. This algorithm for finding shortest paths takes advantage of matrix representations of a graph and works well for dense graphs where all-pairs shortest path is desired. The results are returns in a numpy array with each column and row representing a node and entries providing the distance along the shortest path between that row's node and column's node. If no path exists the distance is Inf. Parameters ---------- G : NetworkX graph nodelist : list, optional (default=G.nodes) The rows and columns are ordered by the nodes in nodelist. If nodelist is None then the ordering is produced by G.nodes. Nodelist should include all nodes in G. weight: string, optional (default= 'weight') Edge data key corresponding to the edge weight. Returns ------- distance : NumPy matrix A matrix of shortest path distances between nodes. If there is no path between to nodes the corresponding matrix entry will be Inf. Notes ----- Floyd's algorithm is appropriate for finding shortest paths in dense graphs or graphs with negative weights when Dijkstra's algorithm fails. This algorithm can still fail if there are negative cycles. It has running time $O(n^3)$ with running space of $O(n^2)$. Raises ------ NetworkXError If nodelist does not contain all nodes in G. """""" import numpy as np if nodelist is not None: if not (len(nodelist) == len(G) == len(set(nodelist))): msg = ( ""nodelist must contain every node in G with no repeats."" ""If you wanted a subgraph of G use G.subgraph(nodelist)"" ) raise nx.NetworkXError(msg) # To handle cases when an edge has weight=0, we must make sure that # nonedges are not given the value 0 as well. A = nx.to_numpy_array( G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf ) n, m = A.shape np.fill_diagonal(A, 0) # diagonal elements should be zero for i in range(n): # The second term has the same shape as A due to broadcasting A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) return A " 31773,"def fetch_alarms(client: Client, limit: int, fetch_time: str, alarm_status_filter: str, alarm_rule_name_filter: str): alarm_incidents = [] last_run = demisto.getLastRun() alarm_last_run = last_run.get('AlarmLastRun') next_run = dateparser.parse(fetch_time).strftime(""%Y-%m-%dT%H:%M:%S"") alarms_list_args = {'count': limit} if alarm_last_run: alarms_list_args['created_after'] = alarm_last_run elif next_run: alarms_list_args['created_after'] = next_run # filter alerts if alarm_status_filter: alarms_list_args['alarm_status'] = alarm_status_filter # type: ignore if alarm_rule_name_filter: alarms_list_args['alarm_rule_name'] = alarm_rule_name_filter # type: ignore alarms, _ = client.alarms_list_request(**alarms_list_args) for alarm in alarms: alarm['incidentType'] = 'Alarm' incident = { 'name': f'Alarm #{str(alarm.get(""alarmId""))} {alarm.get(""alarmRuleName"")}', 'occurred': f'{alarm.get(""dateInserted"")}Z', 'rawJSON': json.dumps(alarm) } alarm_incidents.append(incident) if alarms: last_run['AlarmLastRun'] = alarms[0].get('dateInserted') demisto.setLastRun(last_run) return alarm_incidents ","def fetch_alarms(client: Client, limit: int, fetch_time: str, alarm_status_filter: str, alarm_rule_name_filter: str): alarm_incidents = [] last_run = demisto.getLastRun() alarm_last_run = last_run.get('AlarmLastRun') next_run = dateparser.parse(fetch_time).strftime(""%Y-%m-%dT%H:%M:%S"") alarms_list_args = {'count': limit} if alarm_last_run: alarms_list_args['created_after'] = alarm_last_run elif next_run: alarms_list_args['created_after'] = next_run # filter alerts if alarm_status_filter: alarms_list_args['alarm_status'] = alarm_status_filter # type: ignore if alarm_rule_name_filter: alarms_list_args['alarm_rule_name'] = alarm_rule_name_filter # type: ignore alarms, _ = client.alarms_list_request(**alarms_list_args) for alarm in alarms: alarm['incidentType'] = 'Alarm' incident = { 'name': f'Alarm #{alarm.get(""alarmId"")} {alarm.get(""alarmRuleName"")}', 'occurred': f'{alarm.get(""dateInserted"")}Z', 'rawJSON': json.dumps(alarm) } alarm_incidents.append(incident) if alarms: last_run['AlarmLastRun'] = alarms[0].get('dateInserted') demisto.setLastRun(last_run) return alarm_incidents " 7540,"def test_qtable_quantity_int_conversion(): """"""Ensure the behavior when converting ``int`` ``Column`` to ``Quantity``, the dtype will be ``float``. See https://github.com/astropy/astropy/issues/10964 for the rationale """""" tab = QTable(dict(time=[1, 2, 3])) tab['length'] = [9, 8, 7] with pytest.warns(UserWarning, match=""dtype is converted to float""): tab['length'].unit = u.m assert np.issubdtype(tab['length'].dtype, np.float) # same for dimensionless unit tab['col2'] = [6, 5, 4] with pytest.warns(UserWarning, match=""dtype is converted to float""): tab['col2'].unit = u.dimensionless_unscaled assert np.issubdtype(tab['col2'].dtype, np.float) # An implied behavior is that when QTable reads a file with a Column of int data with units, # it gets converted to float as well. # See: https://github.com/astropy/astropy/pull/10950#issuecomment-718117133 # can't use in-memory IO, e.g., io.BytesIO - fits IO can't handle it. filename = 'test_qtable_quantity_int_conversion.fits' try: Table([Column(np.arange(3), unit=u.m, name='length')]).write(filename, overwrite=True) tab = table.QTable.read(filename) assert np.issubdtype(tab['length'].dtype, np.float) finally: if os.path.isfile(filename): os.remove(filename) # Ensure warnings only happen in column update, but not column add # - case adding a column tab = QTable(dict(time=[1, 2, 3])) tab['length'] = Column([9, 8, 7], unit=u.m) # - case initial creation tab = QTable([[1, 2, 3]], names=['time'], units=[u.m]) ","def test_qtable_quantity_int_conversion(): """"""Ensure the behavior when converting ``int`` ``Column`` to ``Quantity``, the dtype will be ``float``. See https://github.com/astropy/astropy/issues/10964 for the rationale """""" tab = QTable(dict(time=[1, 2, 3])) tab['length'] = [9, 8, 7] with pytest.warns(AstropyUserWarning, match=""dtype is converted to float""): tab['length'].unit = u.m assert np.issubdtype(tab['length'].dtype, np.float) # same for dimensionless unit tab['col2'] = [6, 5, 4] with pytest.warns(UserWarning, match=""dtype is converted to float""): tab['col2'].unit = u.dimensionless_unscaled assert np.issubdtype(tab['col2'].dtype, np.float) # An implied behavior is that when QTable reads a file with a Column of int data with units, # it gets converted to float as well. # See: https://github.com/astropy/astropy/pull/10950#issuecomment-718117133 # can't use in-memory IO, e.g., io.BytesIO - fits IO can't handle it. filename = 'test_qtable_quantity_int_conversion.fits' try: Table([Column(np.arange(3), unit=u.m, name='length')]).write(filename, overwrite=True) tab = table.QTable.read(filename) assert np.issubdtype(tab['length'].dtype, np.float) finally: if os.path.isfile(filename): os.remove(filename) # Ensure warnings only happen in column update, but not column add # - case adding a column tab = QTable(dict(time=[1, 2, 3])) tab['length'] = Column([9, 8, 7], unit=u.m) # - case initial creation tab = QTable([[1, 2, 3]], names=['time'], units=[u.m]) " 28777,"def loop(*, seconds=0, minutes=0, hours=0, count=None, time=None, reconnect=True, loop=None): """"""A decorator that schedules a task in the background for you with optional reconnect logic. The decorator returns a :class:`Loop`. Parameters ------------ seconds: Union[:class:`int`, :class:`float`] The number of seconds between every iteration. minutes: Union[:class:`int`, :class:`float`] The number of minutes between every iteration. hours: Union[:class:`int`, :class:`float`] The number of hours between every iteration. time: Union[:class:`datetime.time`, Sequence[:class:`datetime.time`]] The exact times to run this loop at. Either a non-empty list or a single value of :class:`datetime.time` should be passed. This cannot be used in conjunction with the relative time parameters. .. versionadded:: 1.7.0 .. note:: Duplicate times will be ignored, and only run once. count: Optional[:class:`int`] The number of loops to do, ``None`` if it should be an infinite loop. reconnect: :class:`bool` Whether to handle errors and restart the task using an exponential back-off algorithm similar to the one used in :meth:`discord.Client.connect`. loop: :class:`asyncio.AbstractEventLoop` The loop to use to register the task, if not given defaults to :func:`asyncio.get_event_loop`. Raises -------- ValueError An invalid value was given. TypeError The function was not a coroutine, or ``times`` parameter was passed in conjunction with relative time parameters. """""" def decorator(func): kwargs = { 'seconds': seconds, 'minutes': minutes, 'hours': hours, 'count': count, 'time': time, 'reconnect': reconnect, 'loop': loop } return Loop(func, **kwargs) return decorator ","def loop(*, seconds=0, minutes=0, hours=0, count=None, time=None, reconnect=True, loop=None): """"""A decorator that schedules a task in the background for you with optional reconnect logic. The decorator returns a :class:`Loop`. Parameters ------------ seconds: Union[:class:`int`, :class:`float`] The number of seconds between every iteration. minutes: Union[:class:`int`, :class:`float`] The number of minutes between every iteration. hours: Union[:class:`int`, :class:`float`] The number of hours between every iteration. time: Union[:class:`datetime.time`, Sequence[:class:`datetime.time`]] The exact times to run this loop at. Either a non-empty list or a single value of :class:`datetime.time` should be passed. This cannot be used in conjunction with the relative time parameters. .. versionadded:: 1.7 .. note:: Duplicate times will be ignored, and only run once. count: Optional[:class:`int`] The number of loops to do, ``None`` if it should be an infinite loop. reconnect: :class:`bool` Whether to handle errors and restart the task using an exponential back-off algorithm similar to the one used in :meth:`discord.Client.connect`. loop: :class:`asyncio.AbstractEventLoop` The loop to use to register the task, if not given defaults to :func:`asyncio.get_event_loop`. Raises -------- ValueError An invalid value was given. TypeError The function was not a coroutine, or ``times`` parameter was passed in conjunction with relative time parameters. """""" def decorator(func): kwargs = { 'seconds': seconds, 'minutes': minutes, 'hours': hours, 'count': count, 'time': time, 'reconnect': reconnect, 'loop': loop } return Loop(func, **kwargs) return decorator " 43666,"def excitations(n_electrons, n_spin_orbitals, delta_sz=0): r""""""Generates single and double excitations from a Hartree-Fock reference state. Single and double excitations can be generated by acting with the excitation operators :math:`\hat T_1` and :math:`\hat T_2` on the Hartree-Fock (HF) reference state: .. math: && \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in \mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF} \rangle \\ && \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in \mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle, where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied, referred to as virtual (virt), *spin-orbitals* and :math:`\hat c` and :math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively. | .. figure:: ../../_static/qchem/sd_excitations.png :align: center :width: 70% | Args: n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons' is the number of active electrons. n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined, 'n_spin_orbitals' is the number of active spin-orbitals. delta_sz (int): Specifies the selection rules ``sz[p] - sz[r] = delta_sz`` and ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz`` for the spin-projection ``sz`` of the orbitals involved in the single and double excitations, respectively. ``delta_sz`` can take the values :math:`0`, :math:`\pm 1` and :math:`\pm 2`. Returns: tuple(list, list): lists with the indices of the spin-orbitals involved in the single and double excitations **Example** >>> n_electrons = 2 >>> n_spin_orbitals = 4 >>> singles, doubles = excitations(n_electrons, n_spin_orbitals) >>> print(singles) [[0, 2], [1, 3]] >>> print(doubles) [[0, 1, 2, 3]] """""" if not n_electrons > 0: raise ValueError( ""The number of active electrons has to be greater than 0 \n"" ""Got n_electrons = {}"".format(n_electrons) ) if n_spin_orbitals <= n_electrons: raise ValueError( ""The number of active spin-orbitals ({}) "" ""has to be greater than the number of active electrons ({})."".format( n_spin_orbitals, n_electrons ) ) if delta_sz not in (0, 1, -1, 2, -2): raise ValueError( ""Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."".format(delta_sz) ) # define the single-particle state spin quantum number 'sz' sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_spin_orbitals)]) singles = [ [r, p] for r in range(n_electrons) for p in range(n_electrons, n_spin_orbitals) if sz[p] - sz[r] == delta_sz ] doubles = [ [s, r, q, p] for s in range(n_electrons - 1) for r in range(s + 1, n_electrons) for q in range(n_electrons, n_spin_orbitals - 1) for p in range(q + 1, n_spin_orbitals) if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz ] return singles, doubles ","def excitations(n_electrons, n_spin_orbitals, delta_sz=0): r""""""Generates single and double excitations from a Hartree-Fock reference state. Single and double excitations can be generated by acting with the excitation operators :math:`\hat T_1` and :math:`\hat T_2` on the Hartree-Fock (HF) reference state: .. math: && \hat{T}_1 \vert \mathrm{HF} \rangle = \sum_{r \in \mathrm{occ} \\ p \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF} \rangle \\ && \hat{T}_2 \vert \mathrm{HF} \rangle = \sum_{r>s \in \mathrm{occ} \\ p>q \in \mathrm{virt}} \hat{c}_p^\dagger \hat{c}_q^\dagger \hat{c}_r \hat{c}_s \vert \mathrm{HF} \rangle, where the indices :math:`r, s` and :math:`p, q` run over the occupied (occ) and unoccupied, referred to as virtual (virt), *spin-orbitals* and :math:`\hat c` and :math:`\hat c^\dagger` are the electron annihilation and creation operators, respectively. | .. figure:: ../../_static/qchem/sd_excitations.png :align: center :width: 70% | Args: n_electrons (int): Number of electrons. If an active space is defined, 'n_electrons' is the number of active electrons. n_spin_orbitals (int): Number of spin-orbitals. If an active space is defined, this is the number of active spin-orbitals. delta_sz (int): Specifies the selection rules ``sz[p] - sz[r] = delta_sz`` and ``sz[p] + sz[p] - sz[r] - sz[s] = delta_sz`` for the spin-projection ``sz`` of the orbitals involved in the single and double excitations, respectively. ``delta_sz`` can take the values :math:`0`, :math:`\pm 1` and :math:`\pm 2`. Returns: tuple(list, list): lists with the indices of the spin-orbitals involved in the single and double excitations **Example** >>> n_electrons = 2 >>> n_spin_orbitals = 4 >>> singles, doubles = excitations(n_electrons, n_spin_orbitals) >>> print(singles) [[0, 2], [1, 3]] >>> print(doubles) [[0, 1, 2, 3]] """""" if not n_electrons > 0: raise ValueError( ""The number of active electrons has to be greater than 0 \n"" ""Got n_electrons = {}"".format(n_electrons) ) if n_spin_orbitals <= n_electrons: raise ValueError( ""The number of active spin-orbitals ({}) "" ""has to be greater than the number of active electrons ({})."".format( n_spin_orbitals, n_electrons ) ) if delta_sz not in (0, 1, -1, 2, -2): raise ValueError( ""Expected values for 'delta_sz' are 0, +/- 1 and +/- 2 but got ({})."".format(delta_sz) ) # define the single-particle state spin quantum number 'sz' sz = np.array([0.5 if (i % 2 == 0) else -0.5 for i in range(n_spin_orbitals)]) singles = [ [r, p] for r in range(n_electrons) for p in range(n_electrons, n_spin_orbitals) if sz[p] - sz[r] == delta_sz ] doubles = [ [s, r, q, p] for s in range(n_electrons - 1) for r in range(s + 1, n_electrons) for q in range(n_electrons, n_spin_orbitals - 1) for p in range(q + 1, n_spin_orbitals) if (sz[p] + sz[q] - sz[r] - sz[s]) == delta_sz ] return singles, doubles " 52471,"def test_json_to_doc(doc): new_doc = Doc.from_json(doc.vocab, doc.to_json()) new_tokens = [token for token in new_doc] assert new_doc.text == doc.text assert len(new_tokens) == len([token for token in doc]) assert new_doc.text == doc.text == ""c d e "" assert len(new_tokens) == 3 assert new_tokens[0].pos_ == ""VERB"" assert new_tokens[0].tag_ == ""VBP"" assert new_tokens[0].dep_ == ""ROOT"" assert len(new_doc.ents) == 1 assert new_doc.ents[0].start == 1 # character offset! assert new_doc.ents[0].end == 2 # character offset! assert new_doc.ents[0].label_ == ""ORG"" ","def test_json_to_doc(doc): new_doc = Doc.from_json(doc.vocab, doc.to_json()) new_tokens = [token for token in new_doc] assert new_doc.text == doc.text == ""c d e "" assert len(new_tokens) == len([token for token in doc]) == 3 assert new_tokens[0].pos_ == ""VERB"" assert new_tokens[0].tag_ == ""VBP"" assert new_tokens[0].dep_ == ""ROOT"" assert len(new_doc.ents) == 1 assert new_doc.ents[0].start == 1 # character offset! assert new_doc.ents[0].end == 2 # character offset! assert new_doc.ents[0].label_ == ""ORG"" " 31127,"def url_to_clickable(url): """""" Make the given url clickable when in markdown format by concatenating itself, with the proper brackets :type url: ``Union[List[str], str]`` :param url: the url of interest or a list of urls :return: markdown format for clickable url :rtype: ``str`` """""" if url and isinstance(url, list): return ['[{}]({})'.format(item, item) for item in url] return '[{}]({})'.format(url, url) ","def create_url_link(url): """""" Make the given url clickable when in markdown format by concatenating itself, with the proper brackets :type url: ``Union[List[str], str]`` :param url: the url of interest or a list of urls :return: markdown format for clickable url :rtype: ``str`` """""" if url and isinstance(url, list): return ['[{}]({})'.format(item, item) for item in url] return '[{}]({})'.format(url, url) " 21223,"def execute(bench_path): frappe_version = Version(get_current_version('frappe')) if frappe_version.major < 14 or os.name != ""posix"": # Returning False means patch has been skipped return False pre_patch_dir = os.getcwd() old_directory = Path(bench_path, ""archived_sites"") new_directory = Path(bench_path, ""archived"", ""sites"") if not old_directory.exists(): return False if old_directory.is_symlink(): return True os.chdir(bench_path) if not os.path.exists(new_directory): os.makedirs(new_directory) for archived_site_path in old_directory.glob(""*""): shutil.move(str(archived_site_path), str(new_directory)) click.secho(f""Archived sites are now stored under {new_directory}"") if not os.listdir(old_directory): os.rmdir(old_directory) os.symlink(new_directory, old_directory) click.secho(f""Symlink {old_directory} that points to {new_directory}"") os.chdir(pre_patch_dir) ","def execute(bench_path): frappe_version = Version(get_current_version('frappe')) if frappe_version.major < 14 or os.name != ""posix"": # Returning False means patch has been skipped return False pre_patch_dir = os.getcwd() old_directory = Path(bench_path, ""archived_sites"") new_directory = Path(bench_path, ""archived"", ""sites"") if not old_directory.exists(): return False if old_directory.is_symlink(): return True os.chdir(bench_path) if not os.path.exists(new_directory): os.makedirs(new_directory) for archived_site_path in old_directory.glob(""*""): archived_site_path.rename(new_directory) click.secho(f""Archived sites are now stored under {new_directory}"") if not os.listdir(old_directory): os.rmdir(old_directory) os.symlink(new_directory, old_directory) click.secho(f""Symlink {old_directory} that points to {new_directory}"") os.chdir(pre_patch_dir) " 16106,"def duplicate_modbus_validator(config: list) -> list: """"""Control modbus connection for duplicates."""""" hosts: set[str] = set() names: set[str] = set() errors = [] for index, hub in enumerate(config): name = hub.get(CONF_NAME, DEFAULT_HUB) host = hub[CONF_PORT] if hub[CONF_TYPE] == SERIAL else hub[CONF_HOST] if host in hosts: err = f""Modbus {name}  contains duplicate host/port {host}, not loaded!"" _LOGGER.warning(err) errors.append(index) elif name in names: err = f""Modbus {name}  is duplicate, second entry not loaded!"" _LOGGER.warning(err) errors.append(index) else: hosts.add(host) names.add(name) for i in reversed(errors): del config[i] return config ","def duplicate_modbus_validator(config: list) -> list: """"""Control modbus connection for duplicates."""""" hosts: set[str] = set() names: set[str] = set() errors = [] for index, hub in enumerate(config): name = hub.get(CONF_NAME, DEFAULT_HUB) host = hub[CONF_PORT] if hub[CONF_TYPE] == SERIAL else hub[CONF_HOST] if host in hosts: err = f""Modbus {name} contains duplicate host/port {host}, not loaded!"" _LOGGER.warning(err) errors.append(index) elif name in names: err = f""Modbus {name}  is duplicate, second entry not loaded!"" _LOGGER.warning(err) errors.append(index) else: hosts.add(host) names.add(name) for i in reversed(errors): del config[i] return config " 51526,"def main(): # get the directory with the PEP sources in_dir = Path(__file__).parent # get the output directory for target HTML files out_dir = Path(__file__).parent / ""build"" # get list of source peps peps = list(in_dir.glob('pep-*.txt')) peps.extend(in_dir.glob('pep-*.rst')) # sort peps by creation time (from ""Created:"" string in pep source) peps_with_dt = sorted((pep_creation(path), path) for path in peps) # generate rss items for 10 most recent peps items = [] for dt, full_path in peps_with_dt[-10:]: try: pep_num = int(full_path.stem.split(""-"")[-1]) except ValueError: continue title = first_line_starting_with(full_path, ""Title:"") author = first_line_starting_with(full_path, ""Author:"") if ""@"" in author or "" at "" in author: parsed_authors = email.utils.getaddresses([author]) # ideal would be to pass as a list of dicts with names and emails to # item.author, but FeedGen's RSS output doesn't pass W3C # validation (as of 12/06/2021) joined_authors = "", "".join(f""{name} ({email_address})"" for name, email_address in parsed_authors) else: joined_authors = author url = f""https://peps.python.org/pep-{pep_num:0>4}"" item = entry.FeedEntry() item.title(f""PEP {pep_num}: {title}"") item.link(href=url) item.description(pep_abstract(full_path)) item.guid(url, permalink=True) item.published(dt.replace(tzinfo=datetime.timezone.utc)) # ensure datetime has a timezone item.author(email=joined_authors) items.append(item) # The rss envelope desc = """""" Newest Python Enhancement Proposals (PEPs) - Information on new language features, and some meta-information like release procedure and schedules. """""" # Setup feed generator fg = feed.FeedGenerator() fg.language(""en"") fg.generator("""") fg.docs(""https://cyber.harvard.edu/rss/rss.html"") # Add metadata fg.title(""Newest Python PEPs"") fg.link(href=""https://peps.python.org"") fg.link(href=""https://peps.python.org/peps.rss"", rel=""self"") fg.description("" "".join(desc.split())) fg.lastBuildDate(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)) # Add PEP information (ordered by newest first) for item in items: fg.add_entry(item) out_dir.mkdir(exist_ok=True) out_dir.joinpath(""peps.rss"").write_bytes(fg.rss_str(pretty=True)) ","def main(): # get the directory with the PEP sources in_dir = Path(__file__).parent # get the output directory for target HTML files out_dir = Path(__file__).parent / ""build"" # get list of source peps peps = list(in_dir.glob('pep-[0-9][0-9][0-9][0-9].txt')) peps.extend(in_dir.glob('pep-[0-9][0-9][0-9][0-9].rst')) # sort peps by creation time (from ""Created:"" string in pep source) peps_with_dt = sorted((pep_creation(path), path) for path in peps) # generate rss items for 10 most recent peps items = [] for dt, full_path in peps_with_dt[-10:]: try: pep_num = int(full_path.stem.split(""-"")[-1]) except ValueError: continue title = first_line_starting_with(full_path, ""Title:"") author = first_line_starting_with(full_path, ""Author:"") if ""@"" in author or "" at "" in author: parsed_authors = email.utils.getaddresses([author]) # ideal would be to pass as a list of dicts with names and emails to # item.author, but FeedGen's RSS output doesn't pass W3C # validation (as of 12/06/2021) joined_authors = "", "".join(f""{name} ({email_address})"" for name, email_address in parsed_authors) else: joined_authors = author url = f""https://peps.python.org/pep-{pep_num:0>4}"" item = entry.FeedEntry() item.title(f""PEP {pep_num}: {title}"") item.link(href=url) item.description(pep_abstract(full_path)) item.guid(url, permalink=True) item.published(dt.replace(tzinfo=datetime.timezone.utc)) # ensure datetime has a timezone item.author(email=joined_authors) items.append(item) # The rss envelope desc = """""" Newest Python Enhancement Proposals (PEPs) - Information on new language features, and some meta-information like release procedure and schedules. """""" # Setup feed generator fg = feed.FeedGenerator() fg.language(""en"") fg.generator("""") fg.docs(""https://cyber.harvard.edu/rss/rss.html"") # Add metadata fg.title(""Newest Python PEPs"") fg.link(href=""https://peps.python.org"") fg.link(href=""https://peps.python.org/peps.rss"", rel=""self"") fg.description("" "".join(desc.split())) fg.lastBuildDate(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)) # Add PEP information (ordered by newest first) for item in items: fg.add_entry(item) out_dir.mkdir(exist_ok=True) out_dir.joinpath(""peps.rss"").write_bytes(fg.rss_str(pretty=True)) " 34897,"def tile(data, reps): """"""Repeats the whole array multiple times. Parameters ---------- data : relay.Expr The input data to the operator. reps : tuple of int The number of times repeating the tensor a. .. note:: Each dim size of reps must be a positive integer. If reps has length d, the result will have dimension of max(d, a.ndim); If a.ndim < d, a is promoted to be d-dimensional by prepending new axes. If a.ndim ? d, reps is promoted to a.ndim by pre-pending 1's to it. Returns ------- ret : relay.Expr The computed result. """""" return _make.tile(data, reps) ","def tile(data, reps): """"""Repeats the whole array multiple times. Parameters ---------- data : relay.Expr The input data to the operator. reps : tuple of int The number of times repeating the tensor a. .. note:: Each dim size of reps must be a positive integer. If reps has length d, the result will have dimension of max(d, a.ndim); If a.ndim < d, a is promoted to be d-dimensional by prepending new axes. If a.ndim ? d, reps is promoted to data.ndim by prepending 1's to reps. Returns ------- ret : relay.Expr The computed result. """""" return _make.tile(data, reps) " 30907,"def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]: field = args.get('field') regex = args.get('regex', None) regex = re.compile(regex) if regex else None incident = demisto.incidents()[0] data = { 'field': field, 'exists': False } if field in incident: data['exists'] = check_field(incident.get(field), regex) else: custom_fields = incident.get('CustomFields', {}) if field in custom_fields: data['exists'] = check_field(custom_fields.get(field), regex) context = { 'PollingCheckField(val.field == obj.field)': data } human_readable = ""The field exists"" if data['exists'] else ""The field does not exist"" return human_readable, context, data ","def poll_field(args: Dict[str, Any]) -> Tuple[str, dict, dict]: field = args.get('field') regex = args.get('regex') regex = re.compile(regex) if regex else None incident = demisto.incidents()[0] data = { 'field': field, 'exists': False } if field in incident: data['exists'] = check_field(incident.get(field), regex) else: custom_fields = incident.get('CustomFields', {}) if field in custom_fields: data['exists'] = check_field(custom_fields.get(field), regex) context = { 'PollingCheckField(val.field == obj.field)': data } human_readable = ""The field exists"" if data['exists'] else ""The field does not exist"" return human_readable, context, data " 39938,"def get_external_ip_from_centralized_source(log: Logger = IP_DETECTION_LOGGER) -> Union[str, None]: """"""Use hardcoded URL to determine the external IP address of this host."""""" endpoint = 'https://ifconfig.me/' ip = __request(url=endpoint) if ip: log.info(f'Fetched external IP address from centralized source ({endpoint}).') return ip ","def get_external_ip_from_centralized_source(log: Logger = IP_DETECTION_LOGGER) -> Union[str, None]: """"""Use hardcoded URL to determine the external IP address of this host."""""" endpoint = 'https://ifconfig.me/' ip = __request(url=endpoint) if ip: log.info(f'Fetched external IP address, {ip}, from centralized source ({endpoint}).') return ip " 31299,"def get_indicators_command(client, args: dict) -> CommandResults: """""" Gets indicator from opencti to readable output Args: client: OpenCTI Client object args: demisto.args() Returns: readable_output, raw_response """""" indicator_type = argToList(args.get(""indicator_types"")) limit = int(args.get('limit', 50)) limit = 200 if limit > 200 else limit last_run_id, indicators_list = get_indicators(client, indicator_type, limit=limit) if indicators_list: indicators = [{'type': indicator['type'], 'value': indicator['value'], 'id': indicator['rawJSON']['id'], 'createdBy': indicator['rawJSON'].get('createdBy').get('id') if indicator['rawJSON'].get('createdBy') else None, 'score': indicator['rawJSON']['x_opencti_score'], 'description': indicator['rawJSON']['x_opencti_description'], 'labels': [label.get('value') for label in indicator['rawJSON'].get('objectLabel')], 'marking': [mark.get('definition') for mark in indicator['rawJSON'].get('objectMarking')] } for indicator in indicators_list] readable_output = tableToMarkdown('Indicators from OpenCTI', indicators, headers=[""type"", ""value"", ""id""], removeNull=True) return CommandResults( outputs_prefix='OpenCTI.Indicators', outputs_key_field='id', outputs=indicators, readable_output=readable_output, raw_response=indicators_list ) else: return CommandResults(readable_output='No indicators') ","def get_indicators_command(client, args: dict) -> CommandResults: """""" Gets indicator from opencti to readable output Args: client: OpenCTI Client object args: demisto.args() Returns: readable_output, raw_response """""" indicator_type = argToList(args.get(""indicator_types"")) limit = int(args.get('limit', 50)) limit = 200 if limit > 200 else limit _, indicators_list = get_indicators(client, indicator_type, limit=limit) if indicators_list: indicators = [{'type': indicator['type'], 'value': indicator['value'], 'id': indicator['rawJSON']['id'], 'createdBy': indicator['rawJSON'].get('createdBy').get('id') if indicator['rawJSON'].get('createdBy') else None, 'score': indicator['rawJSON']['x_opencti_score'], 'description': indicator['rawJSON']['x_opencti_description'], 'labels': [label.get('value') for label in indicator['rawJSON'].get('objectLabel')], 'marking': [mark.get('definition') for mark in indicator['rawJSON'].get('objectMarking')] } for indicator in indicators_list] readable_output = tableToMarkdown('Indicators from OpenCTI', indicators, headers=[""type"", ""value"", ""id""], removeNull=True) return CommandResults( outputs_prefix='OpenCTI.Indicators', outputs_key_field='id', outputs=indicators, readable_output=readable_output, raw_response=indicators_list ) else: return CommandResults(readable_output='No indicators') " 36814,"def object_equivalence(obj1, obj2, prop_scores={}, threshold=70, **weight_dict): """"""This method returns a true/false value if two objects are semantically equivalent. Internally, it calls the object_similarity function and compares it against the given threshold value. Args: obj1: A stix2 object instance obj2: A stix2 object instance prop_scores: A dictionary that can hold individual property scores, weights, contributing score, matching score and sum of weights. threshold: A numerical value between 0 and 100 to determine the minimum score to result in successfully calling both objects equivalent. This value can be tuned. weight_dict: A dictionary that can be used to override settings in the semantic equivalence process Returns: bool: True if the result of the object similarity is greater than or equal to the threshold value. False otherwise. Warning: Object types need to have property weights defined for the similarity process. Otherwise, those objects will not influence the final score. The WEIGHTS dictionary under `stix2.equivalence.object` can give you an idea on how to add new entries and pass them via the `weight_dict` argument. Similarly, the values or methods can be fine tuned for a particular use case. Note: Default weight_dict: .. include:: ../../object_default_sem_eq_weights.rst Note: This implementation follows the Semantic Equivalence Committee Note. see `the Committee Note `__. """""" similarity_result = object_similarity(obj1, obj2, prop_scores, **weight_dict) if similarity_result >= threshold: return True return False ","def object_equivalence(obj1, obj2, prop_scores={}, threshold=70, **weight_dict): """"""This method returns a true/false value if two objects are semantically equivalent. Internally, it calls the object_similarity function and compares it against the given threshold value. Args: obj1: A stix2 object instance obj2: A stix2 object instance prop_scores: A dictionary that can hold individual property scores, weights, contributing score, matching score and sum of weights. threshold: A numerical value between 0 and 100 to determine the minimum score to result in successfully calling both objects equivalent. This value can be tuned. weight_dict: A dictionary that can be used to override settings in the similarity process Returns: bool: True if the result of the object similarity is greater than or equal to the threshold value. False otherwise. Warning: Object types need to have property weights defined for the similarity process. Otherwise, those objects will not influence the final score. The WEIGHTS dictionary under `stix2.equivalence.object` can give you an idea on how to add new entries and pass them via the `weight_dict` argument. Similarly, the values or methods can be fine tuned for a particular use case. Note: Default weight_dict: .. include:: ../../object_default_sem_eq_weights.rst Note: This implementation follows the Semantic Equivalence Committee Note. see `the Committee Note `__. """""" similarity_result = object_similarity(obj1, obj2, prop_scores, **weight_dict) if similarity_result >= threshold: return True return False " 50810,"def setup(hass, config): """"""Set up the Splunk component."""""" conf = config[DOMAIN] host = conf.get(CONF_HOST) port = conf.get(CONF_PORT) token = conf.get(CONF_TOKEN) use_ssl = conf[CONF_SSL] verify_ssl = conf.get(CONF_VERIFY_SSL) name = conf.get(CONF_NAME) entity_filter = conf[CONF_FILTER] hec = http_event_collector(token, host, ""json"", name, port, use_ssl) hec.SSL_verify = verify_ssl if not hec.check_connectivity(): _LOGGER.exception(""Cannot connect to Splunk"") def splunk_event_listener(event): """"""Listen for new messages on the bus and sends them to Splunk."""""" state = event.data.get(""new_state"") if state is None or not entity_filter(state.entity_id): return try: _state = state_helper.state_as_number(state) except ValueError: _state = state.state payload = { ""time"": event.time_fired.timestamp(), ""host"": name, ""event"": { ""domain"": state.domain, ""entity_id"": state.object_id, ""attributes"": dict(state.attributes), ""value"": _state, }, } hec.batchEvent(payload) def splunk_event_flush(event): hec.flushBatch() hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener) hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush) hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush) return True ","def setup(hass, config): """"""Set up the Splunk component."""""" conf = config[DOMAIN] host = conf.get(CONF_HOST) port = conf.get(CONF_PORT) token = conf.get(CONF_TOKEN) use_ssl = conf[CONF_SSL] verify_ssl = conf.get(CONF_VERIFY_SSL) name = conf.get(CONF_NAME) entity_filter = conf[CONF_FILTER] hec = http_event_collector(token, host, ""json"", name, port, use_ssl) hec.SSL_verify = verify_ssl if not hec.check_connectivity(): _LOGGER.exception(""Error while trying to connect to Splunk"") return False def splunk_event_listener(event): """"""Listen for new messages on the bus and sends them to Splunk."""""" state = event.data.get(""new_state"") if state is None or not entity_filter(state.entity_id): return try: _state = state_helper.state_as_number(state) except ValueError: _state = state.state payload = { ""time"": event.time_fired.timestamp(), ""host"": name, ""event"": { ""domain"": state.domain, ""entity_id"": state.object_id, ""attributes"": dict(state.attributes), ""value"": _state, }, } hec.batchEvent(payload) def splunk_event_flush(event): hec.flushBatch() hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener) hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush) hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush) return True " 27455,"def lintify(meta, recipe_dir=None, conda_forge=False): lints = [] hints = [] major_sections = list(meta.keys()) # If the recipe_dir exists (no guarantee within this function) , we can # find the meta.yaml within it. meta_fname = os.path.join(recipe_dir or """", ""meta.yaml"") sources_section = get_section(meta, ""source"", lints) build_section = get_section(meta, ""build"", lints) requirements_section = get_section(meta, ""requirements"", lints) test_section = get_section(meta, ""test"", lints) about_section = get_section(meta, ""about"", lints) extra_section = get_section(meta, ""extra"", lints) package_section = get_section(meta, ""package"", lints) outputs_section = get_section(meta, ""outputs"", lints) recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else ""recipe"" is_staged_recipes = recipe_dirname != ""recipe"" # 0: Top level keys should be expected unexpected_sections = [] for section in major_sections: if section not in EXPECTED_SECTION_ORDER: lints.append( ""The top level meta key {} is unexpected"".format(section) ) unexpected_sections.append(section) for section in unexpected_sections: major_sections.remove(section) # 1: Top level meta.yaml keys should have a specific order. lint_section_order(major_sections, lints) # 2: The about section should have a home, license and summary. lint_about_contents(about_section, lints) # 3a: The recipe should have some maintainers. if not extra_section.get(""recipe-maintainers"", []): lints.append( ""The recipe could do with some maintainers listed in "" ""the `extra/recipe-maintainers` section."" ) # 3b: Maintainers should be a list if not ( isinstance(extra_section.get(""recipe-maintainers"", []), Sequence) and not isinstance( extra_section.get(""recipe-maintainers"", []), str_type ) ): lints.append(""Recipe maintainers should be a json list."") # 4: The recipe should have some tests. if not any(key in TEST_KEYS for key in test_section): a_test_file_exists = recipe_dir is not None and any( os.path.exists(os.path.join(recipe_dir, test_file)) for test_file in TEST_FILES ) if not a_test_file_exists: has_outputs_test = False no_test_hints = [] if outputs_section: for out in outputs_section: test_out = get_section(out, ""test"", lints) if any(key in TEST_KEYS for key in test_out): has_outputs_test = True else: no_test_hints.append( ""It looks like the '{}' output doesn't "" ""have any tests."".format(out.get(""name"", ""???"")) ) if has_outputs_test: hints.extend(no_test_hints) else: lints.append(""The recipe must have some tests."") # 5: License cannot be 'unknown.' license = about_section.get(""license"", """").lower() if ""unknown"" == license.strip(): lints.append(""The recipe license cannot be unknown."") # 6: Selectors should be in a tidy form. if recipe_dir is not None and os.path.exists(meta_fname): bad_selectors = [] bad_lines = [] # Good selectors look like "".*\s\s#\s[...]"" good_selectors_pat = re.compile(r""(.+?)\s{2,}#\s\[(.+)\](?(2).*)$"") with io.open(meta_fname, ""rt"") as fh: for selector_line, line_number in selector_lines(fh): if not good_selectors_pat.match(selector_line): bad_selectors.append(selector_line) bad_lines.append(line_number) if bad_selectors: lints.append( ""Selectors are suggested to take a "" ""``#[]`` form."" "" See lines {}"".format(bad_lines) ) # 7: The build section should have a build number. if build_section.get(""number"", None) is None: lints.append(""The recipe must have a `build/number` section."") # 8: The build section should be before the run section in requirements. seen_requirements = [ k for k in requirements_section if k in REQUIREMENTS_ORDER ] requirements_order_sorted = sorted( seen_requirements, key=REQUIREMENTS_ORDER.index ) if seen_requirements != requirements_order_sorted: lints.append( ""The `requirements/` sections should be defined "" ""in the following order: "" + "", "".join(REQUIREMENTS_ORDER) + ""; instead saw: "" + "", "".join(seen_requirements) + ""."" ) # 9: Files downloaded should have a hash. for source_section in sources_section: if ""url"" in source_section and not ( {""sha1"", ""sha256"", ""md5""} & set(source_section.keys()) ): lints.append( ""When defining a source/url please add a sha256, sha1 "" ""or md5 checksum (sha256 preferably)."" ) # 10: License should not include the word 'license'. license = about_section.get(""license"", """").lower() if ( ""license"" in license.lower() and ""unlicense"" not in license.lower() and ""licenseref"" not in license.lower() and ""-license"" not in license.lower() ): lints.append( ""The recipe `license` should not include the word "" '""License"".' ) # 11: There should be one empty line at the end of the file. if recipe_dir is not None and os.path.exists(meta_fname): with io.open(meta_fname, ""r"") as f: lines = f.read().split(""\n"") # Count the number of empty lines from the end of the file empty_lines = itertools.takewhile(lambda x: x == """", reversed(lines)) end_empty_lines_count = len(list(empty_lines)) if end_empty_lines_count > 1: lints.append( ""There are {} too many lines. "" ""There should be one empty line at the end of the "" ""file."".format(end_empty_lines_count - 1) ) elif end_empty_lines_count < 1: lints.append( ""There are too few lines. There should be one empty "" ""line at the end of the file."" ) # 12: License family must be valid (conda-build checks for that) try: ensure_valid_license_family(meta) except RuntimeError as e: lints.append(str(e)) # 12a: License family must be valid (conda-build checks for that) license_family = about_section.get(""license_family"", license).lower() license_file = about_section.get(""license_file"", None) if not license_file and any( f for f in NEEDED_FAMILIES if f in license_family ): lints.append(""license_file entry is missing, but is required."") # 13: Check that the recipe name is valid recipe_name = package_section.get(""name"", """").strip() if re.match(""^[a-z0-9_\-.]+$"", recipe_name) is None: lints.append( ""Recipe name has invalid characters. only lowercase alpha, numeric, "" ""underscores, hyphens and dots allowed"" ) # 14: Run conda-forge specific lints if conda_forge: run_conda_forge_specific(meta, recipe_dir, lints, hints) # 15: Check if we are using legacy patterns build_reqs = requirements_section.get(""build"", None) if build_reqs and (""numpy x.x"" in build_reqs): lints.append( ""Using pinned numpy packages is a deprecated pattern. Consider "" ""using the method outlined "" ""[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."" ) # 16: Subheaders should be in the allowed subheadings for section in major_sections: expected_subsections = FIELDS.get(section, []) if not expected_subsections: continue for subsection in get_section(meta, section, lints): if ( section != ""source"" and section != ""outputs"" and subsection not in expected_subsections ): lints.append( ""The {} section contained an unexpected "" ""subsection name. {} is not a valid subsection"" "" name."".format(section, subsection) ) elif section == ""source"" or section == ""outputs"": for source_subsection in subsection: if source_subsection not in expected_subsections: lints.append( ""The {} section contained an unexpected "" ""subsection name. {} is not a valid subsection"" "" name."".format(section, source_subsection) ) # 17: noarch doesn't work with selectors for runtime dependencies if build_section.get(""noarch"") is not None and os.path.exists(meta_fname): with io.open(meta_fname, ""rt"") as fh: in_runreqs = False for line in fh: line_s = line.strip() if line_s == ""host:"" or line_s == ""run:"": in_runreqs = True runreqs_spacing = line[: -len(line.lstrip())] continue if line_s.startswith(""skip:"") and is_selector_line(line): lints.append( ""`noarch` packages can't have selectors. If "" ""the selectors are necessary, please remove "" ""`noarch: {}`."".format(build_section[""noarch""]) ) break if in_runreqs: if runreqs_spacing == line[: -len(line.lstrip())]: in_runreqs = False continue if is_selector_line(line): lints.append( ""`noarch` packages can't have selectors. If "" ""the selectors are necessary, please remove "" ""`noarch: {}`."".format(build_section[""noarch""]) ) break # 19: check version if package_section.get(""version"") is not None: ver = str(package_section.get(""version"")) try: conda_build.conda_interface.VersionOrder(ver) except: lints.append( ""Package version {} doesn't match conda spec"".format(ver) ) # 20: Jinja2 variable definitions should be nice. if recipe_dir is not None and os.path.exists(meta_fname): bad_jinja = [] bad_lines = [] # Good Jinja2 variable definitions look like ""{% set .+ = .+ %}"" good_jinja_pat = re.compile(r""\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}"") with io.open(meta_fname, ""rt"") as fh: for jinja_line, line_number in jinja_lines(fh): if not good_jinja_pat.match(jinja_line): bad_jinja.append(jinja_line) bad_lines.append(line_number) if bad_jinja: lints.append( ""Jinja2 variable definitions are suggested to "" ""take a ``{{%set"" ""="" ""%}}`` form. See lines "" ""{}"".format(bad_lines) ) # 21: Legacy usage of compilers if build_reqs and (""toolchain"" in build_reqs): lints.append( ""Using toolchain directly in this manner is deprecated. Consider "" ""using the compilers outlined "" ""[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."" ) # 22: Single space in pinned requirements for section, requirements in requirements_section.items(): for requirement in requirements or []: req, _, _ = requirement.partition(""#"") if ""{{"" in req: continue parts = req.split() if len(parts) > 2 and parts[1] in [ ""!="", ""="", ""=="", "">"", ""<"", ""<="", "">="", ]: # check for too many spaces lints.append( ( ""``requirements: {section}: {requirement}`` should not "" ""contain a space between relational operator and the version, i.e. "" ""``{name} {pin}``"" ).format( section=section, requirement=requirement, name=parts[0], pin="""".join(parts[1:]), ) ) continue # check that there is a space if there is a pin bad_char_idx = [(parts[0].find(c), c) for c in ""><=""] bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0] if bad_char_idx: bad_char_idx.sort() i = bad_char_idx[0][0] lints.append( ( ""``requirements: {section}: {requirement}`` must "" ""contain a space between the name and the pin, i.e. "" ""``{name} {pin}``"" ).format( section=section, requirement=requirement, name=parts[0][:i], pin=parts[0][i:] + """".join(parts[1:]), ) ) continue # 23: non noarch builds shouldn't use version constraints on python and r-base check_languages = [""python"", ""r-base""] host_reqs = requirements_section.get(""host"") or [] run_reqs = requirements_section.get(""run"") or [] for language in check_languages: if build_section.get(""noarch"") is None and not outputs_section: filtered_host_reqs = [ req for req in host_reqs if req.partition("" "")[0] == str(language) ] filtered_run_reqs = [ req for req in run_reqs if req.partition("" "")[0] == str(language) ] if filtered_host_reqs and not filtered_run_reqs: lints.append( ""If {0} is a host requirement, it should be a run requirement."".format( str(language) ) ) for reqs in [filtered_host_reqs, filtered_run_reqs]: if str(language) in reqs: continue for req in reqs: constraint = req.split("" "", 1)[1] if constraint.startswith("">"") or constraint.startswith( ""<"" ): lints.append( ""Non noarch packages should have {0} requirement without any version constraints."".format( str(language) ) ) # 24: jinja2 variable references should be {{var}} if recipe_dir is not None and os.path.exists(meta_fname): bad_vars = [] bad_lines = [] with io.open(meta_fname, ""rt"") as fh: for i, line in enumerate(fh.readlines()): for m in JINJA_VAR_PAT.finditer(line): if m.group(1) is not None: var = m.group(1) if var != "" %s "" % var.strip(): bad_vars.append(m.group(1).strip()) bad_lines.append(i + 1) if bad_vars: hints.append( ""Jinja2 variable references are suggested to "" ""take a ``{{}}``"" "" form. See lines %s."" % (bad_lines,) ) # 25: require a lower bound on python version if build_section.get(""noarch"") == ""python"" and not outputs_section: for req in run_reqs: if req.startswith(""python"") and req != ""python"": break else: lints.append( ""noarch: python recipes are recommended to have a lower bound "" ""on minimum python bound. This recommendation will become "" ""requirement in a future version."" ) # hints # 1: suggest pip if ""script"" in build_section: scripts = build_section[""script""] if isinstance(scripts, str): scripts = [scripts] for script in scripts: if ""python setup.py install"" in script: hints.append( ""Whenever possible python packages should use pip. "" ""See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"" ) # 2: suggest python noarch (skip on feedstocks) if ( build_section.get(""noarch"") is None and build_reqs and not any([""_compiler_stub"" in b for b in build_reqs]) and (""pip"" in build_reqs) and (is_staged_recipes or not conda_forge) ): with io.open(meta_fname, ""rt"") as fh: in_runreqs = False no_arch_possible = True for line in fh: line_s = line.strip() if line_s == ""host:"" or line_s == ""run:"": in_runreqs = True runreqs_spacing = line[: -len(line.lstrip())] continue if line_s.startswith(""skip:"") and is_selector_line(line): no_arch_possible = False break if in_runreqs: if runreqs_spacing == line[: -len(line.lstrip())]: in_runreqs = False continue if is_selector_line(line): no_arch_possible = False break if no_arch_possible: hints.append( ""Whenever possible python packages should use noarch. "" ""See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"" ) # 3: suggest fixing all recipe/*.sh shellcheck findings shellcheck_enabled = False shell_scripts = [] if recipe_dir: shell_scripts = glob(os.path.join(recipe_dir, ""*.sh"")) # support feedstocks and staged-recipes forge_yaml = glob( os.path.join(recipe_dir, "".."", ""conda-forge.yml"") ) or glob( os.path.join(recipe_dir, "".."", "".."", ""conda-forge.yml""), ) if shell_scripts and forge_yaml: with open(forge_yaml[0], ""r"") as fh: code = get_yaml().load(fh) shellcheck_enabled = code.get(""shellcheck"", {}).get( ""enabled"", shellcheck_enabled ) if shellcheck_enabled and shutil.which(""shellcheck"") and shell_scripts: MAX_SHELLCHECK_LINES = 50 cmd = [ ""shellcheck"", ""--enable=all"", ""--shell=bash"", # SC2154: var is referenced but not assigned, # see https://github.com/koalaman/shellcheck/wiki/SC2154 ""--exclude=SC2154"", ] p = subprocess.Popen( cmd + shell_scripts, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env={ ""PATH"": os.getenv(""PATH"") }, # exclude other env variables to protect against token leakage ) sc_stdout, _ = p.communicate() if p.returncode == 1: # All files successfully scanned with some issues. findings = ( sc_stdout.decode(sys.stdout.encoding) .replace(""\r\n"", ""\n"") .splitlines() ) hints.append( ""Whenever possible fix all shellcheck findings ('"" + "" "".join(cmd) + "" recipe/*.sh -f diff | git apply' helps)"" ) hints.extend(findings[:50]) if len(findings) > MAX_SHELLCHECK_LINES: hints.append( ""Output restricted, there are '%s' more lines."" % (len(findings) - MAX_SHELLCHECK_LINES) ) elif p.returncode != 0: # Something went wrong. hints.append( ""There have been errors while scanning with shellcheck."" ) # 4: Check for SPDX import license_expression license = about_section.get(""license"", """") licensing = license_expression.Licensing() parsed_exceptions = [] try: parsed_licenses = [] parsed_licenses_with_exception = licensing.license_symbols( license.strip(), decompose=False ) for l in parsed_licenses_with_exception: if isinstance(l, license_expression.LicenseWithExceptionSymbol): parsed_licenses.append(l.license_symbol.key) parsed_exceptions.append(l.exception_symbol.key) else: parsed_licenses.append(l.key) except license_expression.ExpressionError: parsed_licenses = [license] licenseref_regex = re.compile(""^LicenseRef[a-zA-Z0-9\-.]*$"") filtered_licenses = [] for license in parsed_licenses: if not licenseref_regex.match(license): filtered_licenses.append(license) with open( os.path.join(os.path.dirname(__file__), ""licenses.txt""), ""r"" ) as f: expected_licenses = f.readlines() expected_licenses = set([l.strip() for l in expected_licenses]) with open( os.path.join(os.path.dirname(__file__), ""license_exceptions.txt""), ""r"" ) as f: expected_exceptions = f.readlines() expected_exceptions = set([l.strip() for l in expected_exceptions]) if set(filtered_licenses) - expected_licenses: hints.append( ""License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression.\n\n"" ""Documentation on acceptable licenses can be found "" ""[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."" ) if set(parsed_exceptions) - expected_exceptions: hints.append( ""License exception is not an SPDX exception.\n\n"" ""Documentation on acceptable licenses can be found "" ""[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."" ) return lints, hints ","def lintify(meta, recipe_dir=None, conda_forge=False): lints = [] hints = [] major_sections = list(meta.keys()) # If the recipe_dir exists (no guarantee within this function) , we can # find the meta.yaml within it. meta_fname = os.path.join(recipe_dir or """", ""meta.yaml"") sources_section = get_section(meta, ""source"", lints) build_section = get_section(meta, ""build"", lints) requirements_section = get_section(meta, ""requirements"", lints) test_section = get_section(meta, ""test"", lints) about_section = get_section(meta, ""about"", lints) extra_section = get_section(meta, ""extra"", lints) package_section = get_section(meta, ""package"", lints) outputs_section = get_section(meta, ""outputs"", lints) recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else ""recipe"" is_staged_recipes = recipe_dirname != ""recipe"" # 0: Top level keys should be expected unexpected_sections = [] for section in major_sections: if section not in EXPECTED_SECTION_ORDER: lints.append( ""The top level meta key {} is unexpected"".format(section) ) unexpected_sections.append(section) for section in unexpected_sections: major_sections.remove(section) # 1: Top level meta.yaml keys should have a specific order. lint_section_order(major_sections, lints) # 2: The about section should have a home, license and summary. lint_about_contents(about_section, lints) # 3a: The recipe should have some maintainers. if not extra_section.get(""recipe-maintainers"", []): lints.append( ""The recipe could do with some maintainers listed in "" ""the `extra/recipe-maintainers` section."" ) # 3b: Maintainers should be a list if not ( isinstance(extra_section.get(""recipe-maintainers"", []), Sequence) and not isinstance( extra_section.get(""recipe-maintainers"", []), str_type ) ): lints.append(""Recipe maintainers should be a json list."") # 4: The recipe should have some tests. if not any(key in TEST_KEYS for key in test_section): a_test_file_exists = recipe_dir is not None and any( os.path.exists(os.path.join(recipe_dir, test_file)) for test_file in TEST_FILES ) if not a_test_file_exists: has_outputs_test = False no_test_hints = [] if outputs_section: for out in outputs_section: test_out = get_section(out, ""test"", lints) if any(key in TEST_KEYS for key in test_out): has_outputs_test = True else: no_test_hints.append( ""It looks like the '{}' output doesn't "" ""have any tests."".format(out.get(""name"", ""???"")) ) if has_outputs_test: hints.extend(no_test_hints) else: lints.append(""The recipe must have some tests."") # 5: License cannot be 'unknown.' license = about_section.get(""license"", """").lower() if ""unknown"" == license.strip(): lints.append(""The recipe license cannot be unknown."") # 6: Selectors should be in a tidy form. if recipe_dir is not None and os.path.exists(meta_fname): bad_selectors = [] bad_lines = [] # Good selectors look like "".*\s\s#\s[...]"" good_selectors_pat = re.compile(r""(.+?)\s{2,}#\s\[(.+)\](?(2).*)$"") with io.open(meta_fname, ""rt"") as fh: for selector_line, line_number in selector_lines(fh): if not good_selectors_pat.match(selector_line): bad_selectors.append(selector_line) bad_lines.append(line_number) if bad_selectors: lints.append( ""Selectors are suggested to take a "" ""``#[]`` form."" "" See lines {}"".format(bad_lines) ) # 7: The build section should have a build number. if build_section.get(""number"", None) is None: lints.append(""The recipe must have a `build/number` section."") # 8: The build section should be before the run section in requirements. seen_requirements = [ k for k in requirements_section if k in REQUIREMENTS_ORDER ] requirements_order_sorted = sorted( seen_requirements, key=REQUIREMENTS_ORDER.index ) if seen_requirements != requirements_order_sorted: lints.append( ""The `requirements/` sections should be defined "" ""in the following order: "" + "", "".join(REQUIREMENTS_ORDER) + ""; instead saw: "" + "", "".join(seen_requirements) + ""."" ) # 9: Files downloaded should have a hash. for source_section in sources_section: if ""url"" in source_section and not ( {""sha1"", ""sha256"", ""md5""} & set(source_section.keys()) ): lints.append( ""When defining a source/url please add a sha256, sha1 "" ""or md5 checksum (sha256 preferably)."" ) # 10: License should not include the word 'license'. license = about_section.get(""license"", """").lower() if ( ""license"" in license.lower() and ""unlicense"" not in license.lower() and ""licenseref"" not in license.lower() and ""-license"" not in license.lower() ): lints.append( ""The recipe `license` should not include the word "" '""License"".' ) # 11: There should be one empty line at the end of the file. if recipe_dir is not None and os.path.exists(meta_fname): with io.open(meta_fname, ""r"") as f: lines = f.read().split(""\n"") # Count the number of empty lines from the end of the file empty_lines = itertools.takewhile(lambda x: x == """", reversed(lines)) end_empty_lines_count = len(list(empty_lines)) if end_empty_lines_count > 1: lints.append( ""There are {} too many lines. "" ""There should be one empty line at the end of the "" ""file."".format(end_empty_lines_count - 1) ) elif end_empty_lines_count < 1: lints.append( ""There are too few lines. There should be one empty "" ""line at the end of the file."" ) # 12: License family must be valid (conda-build checks for that) try: ensure_valid_license_family(meta) except RuntimeError as e: lints.append(str(e)) # 12a: License family must be valid (conda-build checks for that) license_family = about_section.get(""license_family"", license).lower() license_file = about_section.get(""license_file"", None) if not license_file and any( f for f in NEEDED_FAMILIES if f in license_family ): lints.append(""license_file entry is missing, but is required."") # 13: Check that the recipe name is valid recipe_name = package_section.get(""name"", """").strip() if re.match(""^[a-z0-9_\-.]+$"", recipe_name) is None: lints.append( ""Recipe name has invalid characters. only lowercase alpha, numeric, "" ""underscores, hyphens and dots allowed"" ) # 14: Run conda-forge specific lints if conda_forge: run_conda_forge_specific(meta, recipe_dir, lints, hints) # 15: Check if we are using legacy patterns build_reqs = requirements_section.get(""build"", None) if build_reqs and (""numpy x.x"" in build_reqs): lints.append( ""Using pinned numpy packages is a deprecated pattern. Consider "" ""using the method outlined "" ""[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."" ) # 16: Subheaders should be in the allowed subheadings for section in major_sections: expected_subsections = FIELDS.get(section, []) if not expected_subsections: continue for subsection in get_section(meta, section, lints): if ( section != ""source"" and section != ""outputs"" and subsection not in expected_subsections ): lints.append( ""The {} section contained an unexpected "" ""subsection name. {} is not a valid subsection"" "" name."".format(section, subsection) ) elif section == ""source"" or section == ""outputs"": for source_subsection in subsection: if source_subsection not in expected_subsections: lints.append( ""The {} section contained an unexpected "" ""subsection name. {} is not a valid subsection"" "" name."".format(section, source_subsection) ) # 17: noarch doesn't work with selectors for runtime dependencies if build_section.get(""noarch"") is not None and os.path.exists(meta_fname): with io.open(meta_fname, ""rt"") as fh: in_runreqs = False for line in fh: line_s = line.strip() if line_s == ""host:"" or line_s == ""run:"": in_runreqs = True runreqs_spacing = line[: -len(line.lstrip())] continue if line_s.startswith(""skip:"") and is_selector_line(line): lints.append( ""`noarch` packages can't have selectors. If "" ""the selectors are necessary, please remove "" ""`noarch: {}`."".format(build_section[""noarch""]) ) break if in_runreqs: if runreqs_spacing == line[: -len(line.lstrip())]: in_runreqs = False continue if is_selector_line(line): lints.append( ""`noarch` packages can't have selectors. If "" ""the selectors are necessary, please remove "" ""`noarch: {}`."".format(build_section[""noarch""]) ) break # 19: check version if package_section.get(""version"") is not None: ver = str(package_section.get(""version"")) try: conda_build.conda_interface.VersionOrder(ver) except: lints.append( ""Package version {} doesn't match conda spec"".format(ver) ) # 20: Jinja2 variable definitions should be nice. if recipe_dir is not None and os.path.exists(meta_fname): bad_jinja = [] bad_lines = [] # Good Jinja2 variable definitions look like ""{% set .+ = .+ %}"" good_jinja_pat = re.compile(r""\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}"") with io.open(meta_fname, ""rt"") as fh: for jinja_line, line_number in jinja_lines(fh): if not good_jinja_pat.match(jinja_line): bad_jinja.append(jinja_line) bad_lines.append(line_number) if bad_jinja: lints.append( ""Jinja2 variable definitions are suggested to "" ""take a ``{{%set"" ""="" ""%}}`` form. See lines "" ""{}"".format(bad_lines) ) # 21: Legacy usage of compilers if build_reqs and (""toolchain"" in build_reqs): lints.append( ""Using toolchain directly in this manner is deprecated. Consider "" ""using the compilers outlined "" ""[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."" ) # 22: Single space in pinned requirements for section, requirements in requirements_section.items(): for requirement in requirements or []: req, _, _ = requirement.partition(""#"") if ""{{"" in req: continue parts = req.split() if len(parts) > 2 and parts[1] in [ ""!="", ""="", ""=="", "">"", ""<"", ""<="", "">="", ]: # check for too many spaces lints.append( ( ""``requirements: {section}: {requirement}`` should not "" ""contain a space between relational operator and the version, i.e. "" ""``{name} {pin}``"" ).format( section=section, requirement=requirement, name=parts[0], pin="""".join(parts[1:]), ) ) continue # check that there is a space if there is a pin bad_char_idx = [(parts[0].find(c), c) for c in ""><=""] bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0] if bad_char_idx: bad_char_idx.sort() i = bad_char_idx[0][0] lints.append( ( ""``requirements: {section}: {requirement}`` must "" ""contain a space between the name and the pin, i.e. "" ""``{name} {pin}``"" ).format( section=section, requirement=requirement, name=parts[0][:i], pin=parts[0][i:] + """".join(parts[1:]), ) ) continue # 23: non noarch builds shouldn't use version constraints on python and r-base check_languages = [""python"", ""r-base""] host_reqs = requirements_section.get(""host"") or [] run_reqs = requirements_section.get(""run"") or [] for language in check_languages: if build_section.get(""noarch"") is None and not outputs_section: filtered_host_reqs = [ req for req in host_reqs if req.partition("" "")[0] == str(language) ] filtered_run_reqs = [ req for req in run_reqs if req.partition("" "")[0] == str(language) ] if filtered_host_reqs and not filtered_run_reqs: lints.append( ""If {0} is a host requirement, it should be a run requirement."".format( str(language) ) ) for reqs in [filtered_host_reqs, filtered_run_reqs]: if str(language) in reqs: continue for req in reqs: constraint = req.split("" "", 1)[1] if constraint.startswith("">"") or constraint.startswith( ""<"" ): lints.append( ""Non noarch packages should have {0} requirement without any version constraints."".format( str(language) ) ) # 24: jinja2 variable references should be {{var}} if recipe_dir is not None and os.path.exists(meta_fname): bad_vars = [] bad_lines = [] with io.open(meta_fname, ""rt"") as fh: for i, line in enumerate(fh.readlines()): for m in JINJA_VAR_PAT.finditer(line): if m.group(1) is not None: var = m.group(1) if var != "" %s "" % var.strip(): bad_vars.append(m.group(1).strip()) bad_lines.append(i + 1) if bad_vars: hints.append( ""Jinja2 variable references are suggested to "" ""take a ``{{}}``"" "" form. See lines %s."" % (bad_lines,) ) # 25: require a lower bound on python version if build_section.get(""noarch"") == ""python"" and not outputs_section: for req in run_reqs: if (req.strip().split()[0] == ""python"") and (req != ""python""): break else: lints.append( ""noarch: python recipes are recommended to have a lower bound "" ""on minimum python bound. This recommendation will become "" ""requirement in a future version."" ) # hints # 1: suggest pip if ""script"" in build_section: scripts = build_section[""script""] if isinstance(scripts, str): scripts = [scripts] for script in scripts: if ""python setup.py install"" in script: hints.append( ""Whenever possible python packages should use pip. "" ""See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"" ) # 2: suggest python noarch (skip on feedstocks) if ( build_section.get(""noarch"") is None and build_reqs and not any([""_compiler_stub"" in b for b in build_reqs]) and (""pip"" in build_reqs) and (is_staged_recipes or not conda_forge) ): with io.open(meta_fname, ""rt"") as fh: in_runreqs = False no_arch_possible = True for line in fh: line_s = line.strip() if line_s == ""host:"" or line_s == ""run:"": in_runreqs = True runreqs_spacing = line[: -len(line.lstrip())] continue if line_s.startswith(""skip:"") and is_selector_line(line): no_arch_possible = False break if in_runreqs: if runreqs_spacing == line[: -len(line.lstrip())]: in_runreqs = False continue if is_selector_line(line): no_arch_possible = False break if no_arch_possible: hints.append( ""Whenever possible python packages should use noarch. "" ""See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"" ) # 3: suggest fixing all recipe/*.sh shellcheck findings shellcheck_enabled = False shell_scripts = [] if recipe_dir: shell_scripts = glob(os.path.join(recipe_dir, ""*.sh"")) # support feedstocks and staged-recipes forge_yaml = glob( os.path.join(recipe_dir, "".."", ""conda-forge.yml"") ) or glob( os.path.join(recipe_dir, "".."", "".."", ""conda-forge.yml""), ) if shell_scripts and forge_yaml: with open(forge_yaml[0], ""r"") as fh: code = get_yaml().load(fh) shellcheck_enabled = code.get(""shellcheck"", {}).get( ""enabled"", shellcheck_enabled ) if shellcheck_enabled and shutil.which(""shellcheck"") and shell_scripts: MAX_SHELLCHECK_LINES = 50 cmd = [ ""shellcheck"", ""--enable=all"", ""--shell=bash"", # SC2154: var is referenced but not assigned, # see https://github.com/koalaman/shellcheck/wiki/SC2154 ""--exclude=SC2154"", ] p = subprocess.Popen( cmd + shell_scripts, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env={ ""PATH"": os.getenv(""PATH"") }, # exclude other env variables to protect against token leakage ) sc_stdout, _ = p.communicate() if p.returncode == 1: # All files successfully scanned with some issues. findings = ( sc_stdout.decode(sys.stdout.encoding) .replace(""\r\n"", ""\n"") .splitlines() ) hints.append( ""Whenever possible fix all shellcheck findings ('"" + "" "".join(cmd) + "" recipe/*.sh -f diff | git apply' helps)"" ) hints.extend(findings[:50]) if len(findings) > MAX_SHELLCHECK_LINES: hints.append( ""Output restricted, there are '%s' more lines."" % (len(findings) - MAX_SHELLCHECK_LINES) ) elif p.returncode != 0: # Something went wrong. hints.append( ""There have been errors while scanning with shellcheck."" ) # 4: Check for SPDX import license_expression license = about_section.get(""license"", """") licensing = license_expression.Licensing() parsed_exceptions = [] try: parsed_licenses = [] parsed_licenses_with_exception = licensing.license_symbols( license.strip(), decompose=False ) for l in parsed_licenses_with_exception: if isinstance(l, license_expression.LicenseWithExceptionSymbol): parsed_licenses.append(l.license_symbol.key) parsed_exceptions.append(l.exception_symbol.key) else: parsed_licenses.append(l.key) except license_expression.ExpressionError: parsed_licenses = [license] licenseref_regex = re.compile(""^LicenseRef[a-zA-Z0-9\-.]*$"") filtered_licenses = [] for license in parsed_licenses: if not licenseref_regex.match(license): filtered_licenses.append(license) with open( os.path.join(os.path.dirname(__file__), ""licenses.txt""), ""r"" ) as f: expected_licenses = f.readlines() expected_licenses = set([l.strip() for l in expected_licenses]) with open( os.path.join(os.path.dirname(__file__), ""license_exceptions.txt""), ""r"" ) as f: expected_exceptions = f.readlines() expected_exceptions = set([l.strip() for l in expected_exceptions]) if set(filtered_licenses) - expected_licenses: hints.append( ""License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression.\n\n"" ""Documentation on acceptable licenses can be found "" ""[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."" ) if set(parsed_exceptions) - expected_exceptions: hints.append( ""License exception is not an SPDX exception.\n\n"" ""Documentation on acceptable licenses can be found "" ""[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."" ) return lints, hints " 46917,"def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path: (str) path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir: (str) path to a directory containing a correctly shaped checkpoint save_path: (str) directory to save the new model """""" hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob(""*.ckpt"")) assert ckpt_files, f""could not find any ckpt files inside the {pl_ckpt_path} directory"" if len(ckpt_files) > 1: logger.info(f""averaging {ckpt_files}"") state_dicts = [sanitize(torch.load(x, map_location=""cpu"")[""state_dict""]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f""missing keys: {missing}"" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant ","def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path: (str) path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir: (str) path to a directory containing a correctly shaped checkpoint save_path (:obj:`str`): Directory to save the new model """""" hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob(""*.ckpt"")) assert ckpt_files, f""could not find any ckpt files inside the {pl_ckpt_path} directory"" if len(ckpt_files) > 1: logger.info(f""averaging {ckpt_files}"") state_dicts = [sanitize(torch.load(x, map_location=""cpu"")[""state_dict""]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f""missing keys: {missing}"" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant " 5800,"def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'): """""" Performs the one-sample Kolmogorov-Smirnov test for goodness of fit. This test compares the underlying distribution F(x) of a sample against a given continuous distribution G(x). See Notes for a description of the available null and alternative hypotheses. Parameters ---------- x : array_like a 1-D array of observations of iid random variables. cdf : callable callable used to calculate the cdf. args : tuple, sequence, optional Distribution parameters, used with `cdf`. alternative : {'two-sided', 'less', 'greater'}, optional Defines the null and alternative hypotheses. Default is 'two-sided'. Please see explanations in the Notes below. mode : {'auto', 'exact', 'approx', 'asymp'}, optional Defines the distribution used for calculating the p-value. The following options are available (default is 'auto'): * 'auto' : selects one of the other options. * 'exact' : uses the exact distribution of test statistic. * 'approx' : approximates the two-sided probability with twice the one-sided probability * 'asymp': uses asymptotic distribution of test statistic Returns ------- statistic : float KS test statistic, either D, D+ or D- (depending on the value of 'alternative') pvalue : float One-tailed or two-tailed p-value. See Also -------- ks_2samp, kstest Notes ----- There are three options for the null and corresponding alternative hypothesis that can be selected using the `alternative` parameter. - `two-sided`: The null hypothesis is that the two distributions are identical, F(x)=G(x) for all x; the alternative is that they are not identical. - `less`: The null hypothesis is that F(x) >= G(x) for all x; the alternative is that F(x) < G(x) for at least one x. - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the alternative is that F(x) > G(x) for at least one x. Note that the alternative hypotheses describe the *CDFs* of the underlying distributions, not the observed values. For example, suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in x1 tend to be less than those in x2. Examples -------- >>> from scipy import stats >>> rng = np.random.default_rng(1638083107694713882823079058616272161) Suppose we wish to assess whether data are distributed according to the standard normal distribution. We choose a confidence level of 95%; that is, we will reject the null hypothesis in favor of the alternative if the p-value is less than 0.05. When testing uniformly distributed data, we would expect the null hypothesis to be rejected. >>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng), ... stats.norm.cdf) KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23) Indeed, the p-value is lower than our threshold of 0.05, so we reject the null hypothesis that the data are normally distributed. When testing random variates from the standard normal distribution, we expect the data to be consistent with the null hypothesis most of the time. >>> x = stats.norm.rvs(size=100, random_state=rng) >>> stats.ks_1samp(x, stats.norm.cdf) KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717) As expected, the p-value of 0.92 is not below our threshold of 0.05, so we cannot reject the null hypothesis. Suppose, however, that the location of the normally distributed random variates is shifted toward larger values. In this case, the CDF of the random variates tends to be *less* than the CDF of the standard normal. Therefore, we would expect the null hypothesis to be rejected with ``alternative='less'``: >>> x = x + 0.5 >>> stats.ks_1samp(x, stats.norm.cdf, alternative='less') KstestResult(statistic=0.21707976606981766, pvalue=6.435039066115298e-05) and ``alternative='two-sided'``: >>> stats.ks_1samp(x, stats.norm.cdf, alternative='two-sided') KstestResult(statistic=0.21707976606981766, pvalue=0.00012870078132230596) but not with ``alternative='greater'``: >>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater') KstestResult(statistic=0.003970612076116944, pvalue=0.9941219063026167) """""" alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( alternative.lower()[0], alternative) if alternative not in ['two-sided', 'greater', 'less']: raise ValueError(""Unexpected alternative %s"" % alternative) if np.ma.is_masked(x): x = x.compressed() N = len(x) x = np.sort(x) cdfvals = cdf(x, *args) if alternative == 'greater': Dplus = _compute_dplus(cdfvals) return KstestResult(Dplus, distributions.ksone.sf(Dplus, N)) if alternative == 'less': Dminus = _compute_dminus(cdfvals) return KstestResult(Dminus, distributions.ksone.sf(Dminus, N)) # alternative == 'two-sided': Dplus = _compute_dplus(cdfvals) Dminus = _compute_dminus(cdfvals) D = np.max([Dplus, Dminus]) if mode == 'auto': # Always select exact mode = 'exact' if mode == 'exact': prob = distributions.kstwo.sf(D, N) elif mode == 'asymp': prob = distributions.kstwobign.sf(D * np.sqrt(N)) else: # mode == 'approx' prob = 2 * distributions.ksone.sf(D, N) prob = np.clip(prob, 0, 1) return KstestResult(D, prob) ","def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'): """""" Performs the one-sample Kolmogorov-Smirnov test for goodness of fit. This test compares the underlying distribution F(x) of a sample against a given continuous distribution G(x). See Notes for a description of the available null and alternative hypotheses. Parameters ---------- x : array_like a 1-D array of observations of iid random variables. cdf : callable callable used to calculate the cdf. args : tuple, sequence, optional Distribution parameters, used with `cdf`. alternative : {'two-sided', 'less', 'greater'}, optional Defines the null and alternative hypotheses. Default is 'two-sided'. Please see explanations in the Notes below. mode : {'auto', 'exact', 'approx', 'asymp'}, optional Defines the distribution used for calculating the p-value. The following options are available (default is 'auto'): * 'auto' : selects one of the other options. * 'exact' : uses the exact distribution of test statistic. * 'approx' : approximates the two-sided probability with twice the one-sided probability * 'asymp': uses asymptotic distribution of test statistic Returns ------- statistic : float KS test statistic, either D, D+ or D- (depending on the value of 'alternative') pvalue : float One-tailed or two-tailed p-value. See Also -------- ks_2samp, kstest Notes ----- There are three options for the null and corresponding alternative hypothesis that can be selected using the `alternative` parameter. - `two-sided`: The null hypothesis is that the two distributions are identical, F(x)=G(x) for all x; the alternative is that they are not identical. - `less`: The null hypothesis is that F(x) >= G(x) for all x; the alternative is that F(x) < G(x) for at least one x. - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the alternative is that F(x) > G(x) for at least one x. Note that the alternative hypotheses describe the *CDFs* of the underlying distributions, not the observed values. For example, suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in x1 tend to be less than those in x2. Examples -------- >>> from scipy import stats >>> rng = np.random.default_rng() Suppose we wish to assess whether data are distributed according to the standard normal distribution. We choose a confidence level of 95%; that is, we will reject the null hypothesis in favor of the alternative if the p-value is less than 0.05. When testing uniformly distributed data, we would expect the null hypothesis to be rejected. >>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng), ... stats.norm.cdf) KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23) Indeed, the p-value is lower than our threshold of 0.05, so we reject the null hypothesis that the data are normally distributed. When testing random variates from the standard normal distribution, we expect the data to be consistent with the null hypothesis most of the time. >>> x = stats.norm.rvs(size=100, random_state=rng) >>> stats.ks_1samp(x, stats.norm.cdf) KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717) As expected, the p-value of 0.92 is not below our threshold of 0.05, so we cannot reject the null hypothesis. Suppose, however, that the location of the normally distributed random variates is shifted toward larger values. In this case, the CDF of the random variates tends to be *less* than the CDF of the standard normal. Therefore, we would expect the null hypothesis to be rejected with ``alternative='less'``: >>> x = x + 0.5 >>> stats.ks_1samp(x, stats.norm.cdf, alternative='less') KstestResult(statistic=0.21707976606981766, pvalue=6.435039066115298e-05) and ``alternative='two-sided'``: >>> stats.ks_1samp(x, stats.norm.cdf, alternative='two-sided') KstestResult(statistic=0.21707976606981766, pvalue=0.00012870078132230596) but not with ``alternative='greater'``: >>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater') KstestResult(statistic=0.003970612076116944, pvalue=0.9941219063026167) """""" alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( alternative.lower()[0], alternative) if alternative not in ['two-sided', 'greater', 'less']: raise ValueError(""Unexpected alternative %s"" % alternative) if np.ma.is_masked(x): x = x.compressed() N = len(x) x = np.sort(x) cdfvals = cdf(x, *args) if alternative == 'greater': Dplus = _compute_dplus(cdfvals) return KstestResult(Dplus, distributions.ksone.sf(Dplus, N)) if alternative == 'less': Dminus = _compute_dminus(cdfvals) return KstestResult(Dminus, distributions.ksone.sf(Dminus, N)) # alternative == 'two-sided': Dplus = _compute_dplus(cdfvals) Dminus = _compute_dminus(cdfvals) D = np.max([Dplus, Dminus]) if mode == 'auto': # Always select exact mode = 'exact' if mode == 'exact': prob = distributions.kstwo.sf(D, N) elif mode == 'asymp': prob = distributions.kstwobign.sf(D * np.sqrt(N)) else: # mode == 'approx' prob = 2 * distributions.ksone.sf(D, N) prob = np.clip(prob, 0, 1) return KstestResult(D, prob) " 58825,"def dotu(x, y, out=None): """"""Computes the dot product of x and y."""""" dtype = x.dtype.char if dtype in 'fd': return dot(x, y, out=out) elif dtype == 'F': func = cublas.cdotu elif dtype == 'D': func = cublas.zdotu else: raise TypeError('invalid dtype') _check_two_vectors(x, y) handle = device.get_cublas_handle() result_dtype = dtype result_ptr, result, mode = _setup_result_ptr(handle, out, result_dtype) func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr) cublas.setPointerMode(handle, mode) if out is None: out = result elif out.dtype != result_dtype: out[...] = result return out ","def dotu(x, y, out=None): """"""Computes the dot product of x and y."""""" dtype = x.dtype.char if dtype in 'fd': return dot(x, y, out=out) elif dtype == 'F': func = cublas.cdotu elif dtype == 'D': func = cublas.zdotu else: raise TypeError('invalid dtype') _check_two_vectors(x, y) handle = device.get_cublas_handle() result_dtype = dtype result_ptr, result, orig_mode = _setup_result_ptr(handle, out, result_dtype) func(handle, x.size, x.data.ptr, 1, y.data.ptr, 1, result_ptr) cublas.setPointerMode(handle, orig_mode) if out is None: out = result elif out.dtype != result_dtype: out[...] = result return out " 37336,"def draw(program: Union[Waveform, ParametricPulse, Schedule], style: Optional[Dict[str, Any]] = None, backend: Optional[BaseBackend] = None, time_range_dt: Optional[Tuple[int, int]] = None, time_range_ns: Optional[Tuple[int, int]] = None, disable_channels: Optional[List[PulseChannel]] = None, show_snapshot: bool = True, show_framechange: bool = True, show_waveform_info: bool = True, show_barrier: bool = True, plotter: str = types.Plotter.Mpl2D, axis: Optional[Any] = None, filename: Optional[str] = None): """"""Generate visualization data for pulse programs. Args: program: Program to visualize. This program can be arbitrary Qiskit Pulse program, such as :py:class:~`qiskit.pulse.Waveform`, :py:class:~`qiskit.pulse.ParametricPulse`, and :py:class:~`qiskit.pulse.Schedule`. style: Stylesheet options. This can be dictionary or preset stylesheet classes. See :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxStandard`, :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxPublication`, and :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxDebugging` for details of preset stylesheets. See also the stylesheet section for details of configuration keys. backend: Backend object to play the input pulse program. If this object is provided, the input program is visualized with the details of hardware information. time_range_dt: Set horizontal axis limit in units of dt. time_range_ns: Set horizontal axis limit in units of ns. This is available only when `backend` object is set to the canvas. disable_channels: List of pulse channel instances not shown in the output image. show_snapshot: Set `True` to show snapshot instructions. show_framechange: Set `True` to show frame change instructions. The frame change indicates instructions that modulate phase or frequency of pulse channels. show_waveform_info: Set `True` to show additional information about waveforms. show_barrier: Set `True` to show barrier lines. plotter: Name of plotter API to generate an output image. See plotter section for details. axis: Arbitrary object passed to the plotter. If this object is provided, the plotters uses given `axis` instead of internally initializing a figure object. This object format depends on the plotter. See plotters section for details. filename: Set file path string to output image. Returns: Image data. The generated data format depends on the `plotter`. If matplotlib family is specified, this will be a `matplotlib.pyplot.Figure` data. Examples: To visualize a pulse program, you can call this function with set of control arguments. Most of appearance of the output image can be controlled by the stylesheet. Drawing with the default stylesheet. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, backend=FakeAlmaden()) Drawing with the stylesheet suited for publication. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw, IqxPublication from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, style=IqxPublication(), backend=FakeAlmaden()) Drawing with the stylesheet suited for program debugging. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw, IqxDebugging from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, style=IqxDebugging(), backend=FakeAlmaden()) You can partially customize a preset stylesheet when call it. ```python my_style = { 'formatter.channel_scaling.drive': 5, 'formatter.channel_scaling.control': 1, 'formatter.channel_scaling.measure': 5 } style = IqxStandard(**my_style) # draw draw(sched, style=style, backend=FakeAlmaden()) ``` In the same way as above, you can create custom generator or layout functions and update existing stylesheet with custom functions. This feature enables you to control the most of appearance of the output image without modifying the codebase of the pulse drawer. Plotters: - `mpl2d`: Matplotlib API to generate 2D image. Charts are placed along y axis with vertical offset. This API takes matplotlib.axes.Axes as `axis` input. Stylesheet: - formatter.general.fig_width: Width of output image (default `13`). - formatter.general.fig_chart_height: Height of output image per chart. The height of each chart is multiplied with this factor and the sum of all chart heights becomes the height of output image (default `1.5`). - formatter.general.dpi: Dot per inch of image if `filename` is set (default `150`). - formatter.general.vertical_resolution: Vertical resolution of the pulse envelope. The change of data points below this limit is ignored (default `1e-6`). - formatter.general.max_scale: Maximum scaling factor of each chart. This factor is considered when chart auto-scaling is enabled (default `100`). - formatter.color.fill_waveform_w: List of color codes assigned to the real and the imaginary part envelope of waveform or parametric pulse drawing (default `['#648fff', '#002999']`). - formatter.color.fill_waveform_d: List of color codes assigned to the real and the imaginary part envelope of waveforms in the drive channels in the schedule drawing (default `['#648fff', '#002999']`). - formatter.color.fill_waveform_u: List of color codes assigned to the real and the imaginary part envelope of waveforms in the control channels in the schedule drawing (default `['#ffb000', '#994A00']`). - formatter.color.fill_waveform_m: List of color codes assigned to the real and the imaginary part envelope of waveforms in the measure channels in the schedule drawing (default `['#dc267f', '#760019']`). - formatter.color.fill_waveform_a: List of color codes assigned to the real and the imaginary part envelope of waveforms in the acquire channels in the schedule drawing (default `['#dc267f', '#760019']`). - formatter.color.baseline: Color code of lines of zero line of each chart (default `'#000000'`). - formatter.color.barrier: Color code of lines of barrier (default `'#222222'`). - formatter.color.background: Color code of the face color of canvas (default `'#f2f3f4'`). - formatter.color.fig_title: Color code of the figure title text (default `'#000000'`). - formatter.color.annotate: Color code of annotation texts in the canvas (default `'#222222'`). - formatter.color.frame_change: Color code of the symbol for frame changes (default `'#000000'`). - formatter.color.snapshot: Color code of the symbol for snapshot (default `'#000000'`) - formatter.color.axis_label: Color code of axis labels (default `'#000000'`). - formatter.alpha.fill_waveform: Transparency of waveforms. A value in the range from `0` to `1`. The value `0` gives completely transparent waveforms (default `0.3`). - formatter.alpha.baseline: Transparency of base lines. A value in the range from `0` to `1`. The value `0` gives completely transparent base lines (default `1.0`). - formatter.alpha.barrier: Transparency of barrier lines. A value in the range from `0` to `1`. The value `0` gives completely transparent barrier lines (default `0.7`). - formatter.layer.fill_waveform: Layer index of waveforms. Larger number comes in the front of the output image (default `2`). - formatter.layer.baseline: Layer index of baselines. Larger number comes in the front of the output image (default `1`). - formatter.layer.barrier: Layer index of barrier lines. Larger number comes in the front of the output image (default `1`). - formatter.layer.annotate: Layer index of annotations. Larger number comes in the front of the output image (default `5`). - formatter.layer.axis_label: Layer index of axis labels. Larger number comes in the front of the output image (default `5`). - formatter.layer.frame_change: Layer index of frame change symbols. Larger number comes in the front of the output image (default `4`). - formatter.layer.snapshot: Layer index of snapshot symbols. Larger number comes in the front of the output image (default `3`). - formatter.layer.fig_title: Layer index of the figure title. Larger number comes in the front of the output image (default `6`). - formatter.margin.top: Margin from the top boundary of the figure canvas to the surface of the first chart (default `0.5`). - formatter.margin.bottom: Margin from the bottom boundary of the figure canvas to the surface of the last chart (default `0.5`). - formatter.margin.left_percent: Margin from the left boundary of the figure canvas to the zero point of the horizontal axis. The value is in units of percentage of the whole program duration. If the duration is 100 and the value of 0.5 is set, this keeps left margin of 5 (default `0.05`). - formatter.margin.right_percent: Margin from the right boundary of the figure canvas to the left limit of the horizontal axis. The value is in units of percentage of the whole program duration. If the duration is 100 and the value of 0.5 is set, this keeps right margin of 5 (default `0.05`). - formatter.margin.between_channel: Vertical margin between charts (default `0.2`). - formatter.label_offset.pulse_name: Offset of pulse name annotations from the chart baseline (default `0.3`). - formatter.label_offset.chart_info: Offset of chart info annotations from the chart baseline (default `0.3`). - formatter.label_offset.frame_change: Offset of frame change annotations from the chart baseline (default `0.3`). - formatter.label_offset.snapshot: Offset of snapshot annotations from the chart baseline (default `0.3`). - formatter.text_size.axis_label: Text size of axis labels (default `15`). - formatter.text_size.annotate: Text size of annotations (default `12`). - formatter.text_size.frame_change: Text size of frame change symbols (default `20`). - formatter.text_size.snapshot: Text size of snapshot symbols (default `20`). - formatter.text_size.fig_title: Text size of the figure title (default `15`). - formatter.text_size.axis_break_symbol: Text size of axis break symbols (default `15`). - formatter.line_width.fill_waveform: Line width of the fringe of filled waveforms (default `0`). - formatter.line_width.axis_break: Line width of axis breaks. The axis break line paints over other drawing objects with the background face color (default `6`). - formatter.line_width.baseline: Line width of base lines (default `1`) - formatter.line_width.barrier: Line width of barrier lines (default `1`). - formatter.line_style.fill_waveform: Line style of the fringe of filled waveforms. This conforms to the line style spec of matplotlib (default `'-'`). - formatter.line_style.baseline: Line style of base lines. This conforms to the line style spec of matplotlib (default `'-'`). - formatter.line_style.barrier: Line style of barrier lines. This conforms to the line style spec of matplotlib (default `':'`). - formatter.channel_scaling.drive: Default scaling value of drive channel waveforms (default `1.0`). - formatter.channel_scaling.control: Default scaling value of control channel waveforms (default `1.0`). - formatter.channel_scaling.measure: Default scaling value of measure channel waveforms (default `1.0`). - formatter.channel_scaling.acquire: Default scaling value of acquire channel waveforms (default `1.0`). - formatter.channel_scaling.pos_spacing: Minimum height of chart above the baseline. Chart top is determined based on the maximum height of waveforms associated with the chart. If the maximum height is below this value, this value is set as the chart top (default 0.1). - formatter.channel_scaling.neg_spacing: Minimum height of chart below the baseline. Chart bottom is determined based on the minimum height of waveforms associated with the chart. If the minimum height is above this value, this value is set as the chart bottom (default -0.1). - formatter.axis_break.length: Waveform or idle time duration that axis break is applied. Intervals longer than this value are truncated. The value is in units of data points (default `3000`). - formatter.axis_break.max_length: Length of new waveform or idle time duration after axis break is applied. Longer intervals are truncated to this length (default `1000`). - formatter.control.apply_phase_modulation: Set `True` to apply phase modulation to the waveforms (default `True`). - formatter.control.show_snapshot_channel: Set `True` to show snapshot instructions (default `True`). - formatter.control.show_acquire_channel: Set `True` to show acquire channels (default `True`). - formatter.control.show_empty_channel: Set `True` to show charts without any waveforms (default `True`). - formatter.control.auto_chart_scaling: Set `True` to apply auto-scaling to charts (default `True`). - formatter.control.axis_break: Set `True` to apply axis break for long intervals (default `True`). - formatter.unicode_symbol.frame_change: Text that represents the symbol of frame change. This text is used when the plotter doesn't support latex (default u'\u21BA'). - formatter.unicode_symbol.snapshot: Text that represents the symbol of snapshot. This text is used when the plotter doesn't support latex (default u'\u21AF'). - formatter.latex_symbol.frame_change: Latex text that represents the symbol of frame change (default r'\\circlearrowleft'). - formatter.latex_symbol.snapshot: Latex text that represents the symbol of snapshot (default ''). - generator.waveform: List of callback functions that generates drawing object for waveforms. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.waveform` for more details. No default generator is set. - generator.frame: List of callback functions that generates drawing object for frame changes. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.frame` for more details. No default generator is set. - generator.chart: List of callback functions that generates drawing object for charts. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.chart` for more details. No default generator is set. - generator.snapshot: List of callback functions that generates drawing object for snapshots. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.snapshot` for more details. No default generator is set. - generator.barrier: List of callback functions that generates drawing object for barriers. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.barrier` for more details. No default generator is set. - layout.chart_channel_map: Callback function that determines the relationship between pulse channels and charts. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. - layout.time_axis_map: Callback function that determines the layout of horizontal axis labels. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. - layout.figure_title: Callback function that generates a string for the figure title. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. Raises: ImportError: When required visualization package is not installed. VisualizationError: When invalid plotter API is specified. """""" temp_style = stylesheet.QiskitPulseStyle() temp_style.update(style or stylesheet.IqxStandard()) if backend: device = device_info.OpenPulseBackendInfo.create_from_backend(backend) else: device = device_info.OpenPulseBackendInfo() # create empty canvas and load program canvas = core.DrawerCanvas(stylesheet=temp_style, device=device) canvas.load_program(program=program) # # update configuration # # time range if time_range_dt: canvas.set_time_range(*time_range_dt, seconds=False) if time_range_ns: canvas.set_time_range(*time_range_ns, seconds=True) # channels not shown if disable_channels: for chan in disable_channels: canvas.set_disable_channel(chan, remove=True) # show snapshots if not show_snapshot: canvas.set_disable_type(types.DrawingSymbol.SNAPSHOT, remove=True) canvas.set_disable_type(types.DrawingLabel.SNAPSHOT, remove=True) # show frame changes if not show_framechange: canvas.set_disable_type(types.DrawingSymbol.FRAME, remove=True) canvas.set_disable_type(types.DrawingLabel.FRAME, remove=True) # show waveform info if not show_waveform_info: canvas.set_disable_type(types.DrawingLabel.PULSE_INFO, remove=True) canvas.set_disable_type(types.DrawingLabel.PULSE_NAME, remove=True) # show barrier if not show_barrier: canvas.set_disable_type(types.DrawingLine.BARRIER, remove=True) canvas.update() # # Call plotter API and generate image # if plotter == types.Plotter.Mpl2D: try: from qiskit.visualization.pulse_v2.plotters import Mpl2DPlotter except ImportError: raise ImportError('Must have Matplotlib installed.') plotter_api = Mpl2DPlotter(canvas=canvas, axis=axis) plotter_api.draw() else: raise VisualizationError('Plotter API {name} is not supported.'.format(name=plotter)) # save figure if filename: plotter_api.save_file(filename=filename) return plotter_api.get_image() ","def draw(program: Union[Waveform, ParametricPulse, Schedule], style: Optional[Dict[str, Any]] = None, backend: Optional[BaseBackend] = None, time_range_dt: Optional[Tuple[int, int]] = None, time_range_ns: Optional[Tuple[int, int]] = None, disable_channels: Optional[List[PulseChannel]] = None, show_snapshot: bool = True, show_framechange: bool = True, show_waveform_info: bool = True, show_barrier: bool = True, plotter: Union[str, types.Plotter] = types.Plotter.Mpl2D, axis: Optional[Any] = None, filename: Optional[str] = None): """"""Generate visualization data for pulse programs. Args: program: Program to visualize. This program can be arbitrary Qiskit Pulse program, such as :py:class:~`qiskit.pulse.Waveform`, :py:class:~`qiskit.pulse.ParametricPulse`, and :py:class:~`qiskit.pulse.Schedule`. style: Stylesheet options. This can be dictionary or preset stylesheet classes. See :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxStandard`, :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxPublication`, and :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxDebugging` for details of preset stylesheets. See also the stylesheet section for details of configuration keys. backend: Backend object to play the input pulse program. If this object is provided, the input program is visualized with the details of hardware information. time_range_dt: Set horizontal axis limit in units of dt. time_range_ns: Set horizontal axis limit in units of ns. This is available only when `backend` object is set to the canvas. disable_channels: List of pulse channel instances not shown in the output image. show_snapshot: Set `True` to show snapshot instructions. show_framechange: Set `True` to show frame change instructions. The frame change indicates instructions that modulate phase or frequency of pulse channels. show_waveform_info: Set `True` to show additional information about waveforms. show_barrier: Set `True` to show barrier lines. plotter: Name of plotter API to generate an output image. See plotter section for details. axis: Arbitrary object passed to the plotter. If this object is provided, the plotters uses given `axis` instead of internally initializing a figure object. This object format depends on the plotter. See plotters section for details. filename: Set file path string to output image. Returns: Image data. The generated data format depends on the `plotter`. If matplotlib family is specified, this will be a `matplotlib.pyplot.Figure` data. Examples: To visualize a pulse program, you can call this function with set of control arguments. Most of appearance of the output image can be controlled by the stylesheet. Drawing with the default stylesheet. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, backend=FakeAlmaden()) Drawing with the stylesheet suited for publication. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw, IqxPublication from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, style=IqxPublication(), backend=FakeAlmaden()) Drawing with the stylesheet suited for program debugging. .. jupyter-execute:: from qiskit import QuantumCircuit, transpile, schedule from qiskit.visualization.pulse_v2 import draw, IqxDebugging from qiskit.test.mock import FakeAlmaden qc = QuantumCircuit(2) qc.h(0) qc.cx(0, 1) qc.measure_all() qc = transpile(qc, FakeAlmaden()) sched = schedule(qc, FakeAlmaden()) # draw draw(sched, style=IqxDebugging(), backend=FakeAlmaden()) You can partially customize a preset stylesheet when call it. ```python my_style = { 'formatter.channel_scaling.drive': 5, 'formatter.channel_scaling.control': 1, 'formatter.channel_scaling.measure': 5 } style = IqxStandard(**my_style) # draw draw(sched, style=style, backend=FakeAlmaden()) ``` In the same way as above, you can create custom generator or layout functions and update existing stylesheet with custom functions. This feature enables you to control the most of appearance of the output image without modifying the codebase of the pulse drawer. Plotters: - `mpl2d`: Matplotlib API to generate 2D image. Charts are placed along y axis with vertical offset. This API takes matplotlib.axes.Axes as `axis` input. Stylesheet: - formatter.general.fig_width: Width of output image (default `13`). - formatter.general.fig_chart_height: Height of output image per chart. The height of each chart is multiplied with this factor and the sum of all chart heights becomes the height of output image (default `1.5`). - formatter.general.dpi: Dot per inch of image if `filename` is set (default `150`). - formatter.general.vertical_resolution: Vertical resolution of the pulse envelope. The change of data points below this limit is ignored (default `1e-6`). - formatter.general.max_scale: Maximum scaling factor of each chart. This factor is considered when chart auto-scaling is enabled (default `100`). - formatter.color.fill_waveform_w: List of color codes assigned to the real and the imaginary part envelope of waveform or parametric pulse drawing (default `['#648fff', '#002999']`). - formatter.color.fill_waveform_d: List of color codes assigned to the real and the imaginary part envelope of waveforms in the drive channels in the schedule drawing (default `['#648fff', '#002999']`). - formatter.color.fill_waveform_u: List of color codes assigned to the real and the imaginary part envelope of waveforms in the control channels in the schedule drawing (default `['#ffb000', '#994A00']`). - formatter.color.fill_waveform_m: List of color codes assigned to the real and the imaginary part envelope of waveforms in the measure channels in the schedule drawing (default `['#dc267f', '#760019']`). - formatter.color.fill_waveform_a: List of color codes assigned to the real and the imaginary part envelope of waveforms in the acquire channels in the schedule drawing (default `['#dc267f', '#760019']`). - formatter.color.baseline: Color code of lines of zero line of each chart (default `'#000000'`). - formatter.color.barrier: Color code of lines of barrier (default `'#222222'`). - formatter.color.background: Color code of the face color of canvas (default `'#f2f3f4'`). - formatter.color.fig_title: Color code of the figure title text (default `'#000000'`). - formatter.color.annotate: Color code of annotation texts in the canvas (default `'#222222'`). - formatter.color.frame_change: Color code of the symbol for frame changes (default `'#000000'`). - formatter.color.snapshot: Color code of the symbol for snapshot (default `'#000000'`) - formatter.color.axis_label: Color code of axis labels (default `'#000000'`). - formatter.alpha.fill_waveform: Transparency of waveforms. A value in the range from `0` to `1`. The value `0` gives completely transparent waveforms (default `0.3`). - formatter.alpha.baseline: Transparency of base lines. A value in the range from `0` to `1`. The value `0` gives completely transparent base lines (default `1.0`). - formatter.alpha.barrier: Transparency of barrier lines. A value in the range from `0` to `1`. The value `0` gives completely transparent barrier lines (default `0.7`). - formatter.layer.fill_waveform: Layer index of waveforms. Larger number comes in the front of the output image (default `2`). - formatter.layer.baseline: Layer index of baselines. Larger number comes in the front of the output image (default `1`). - formatter.layer.barrier: Layer index of barrier lines. Larger number comes in the front of the output image (default `1`). - formatter.layer.annotate: Layer index of annotations. Larger number comes in the front of the output image (default `5`). - formatter.layer.axis_label: Layer index of axis labels. Larger number comes in the front of the output image (default `5`). - formatter.layer.frame_change: Layer index of frame change symbols. Larger number comes in the front of the output image (default `4`). - formatter.layer.snapshot: Layer index of snapshot symbols. Larger number comes in the front of the output image (default `3`). - formatter.layer.fig_title: Layer index of the figure title. Larger number comes in the front of the output image (default `6`). - formatter.margin.top: Margin from the top boundary of the figure canvas to the surface of the first chart (default `0.5`). - formatter.margin.bottom: Margin from the bottom boundary of the figure canvas to the surface of the last chart (default `0.5`). - formatter.margin.left_percent: Margin from the left boundary of the figure canvas to the zero point of the horizontal axis. The value is in units of percentage of the whole program duration. If the duration is 100 and the value of 0.5 is set, this keeps left margin of 5 (default `0.05`). - formatter.margin.right_percent: Margin from the right boundary of the figure canvas to the left limit of the horizontal axis. The value is in units of percentage of the whole program duration. If the duration is 100 and the value of 0.5 is set, this keeps right margin of 5 (default `0.05`). - formatter.margin.between_channel: Vertical margin between charts (default `0.2`). - formatter.label_offset.pulse_name: Offset of pulse name annotations from the chart baseline (default `0.3`). - formatter.label_offset.chart_info: Offset of chart info annotations from the chart baseline (default `0.3`). - formatter.label_offset.frame_change: Offset of frame change annotations from the chart baseline (default `0.3`). - formatter.label_offset.snapshot: Offset of snapshot annotations from the chart baseline (default `0.3`). - formatter.text_size.axis_label: Text size of axis labels (default `15`). - formatter.text_size.annotate: Text size of annotations (default `12`). - formatter.text_size.frame_change: Text size of frame change symbols (default `20`). - formatter.text_size.snapshot: Text size of snapshot symbols (default `20`). - formatter.text_size.fig_title: Text size of the figure title (default `15`). - formatter.text_size.axis_break_symbol: Text size of axis break symbols (default `15`). - formatter.line_width.fill_waveform: Line width of the fringe of filled waveforms (default `0`). - formatter.line_width.axis_break: Line width of axis breaks. The axis break line paints over other drawing objects with the background face color (default `6`). - formatter.line_width.baseline: Line width of base lines (default `1`) - formatter.line_width.barrier: Line width of barrier lines (default `1`). - formatter.line_style.fill_waveform: Line style of the fringe of filled waveforms. This conforms to the line style spec of matplotlib (default `'-'`). - formatter.line_style.baseline: Line style of base lines. This conforms to the line style spec of matplotlib (default `'-'`). - formatter.line_style.barrier: Line style of barrier lines. This conforms to the line style spec of matplotlib (default `':'`). - formatter.channel_scaling.drive: Default scaling value of drive channel waveforms (default `1.0`). - formatter.channel_scaling.control: Default scaling value of control channel waveforms (default `1.0`). - formatter.channel_scaling.measure: Default scaling value of measure channel waveforms (default `1.0`). - formatter.channel_scaling.acquire: Default scaling value of acquire channel waveforms (default `1.0`). - formatter.channel_scaling.pos_spacing: Minimum height of chart above the baseline. Chart top is determined based on the maximum height of waveforms associated with the chart. If the maximum height is below this value, this value is set as the chart top (default 0.1). - formatter.channel_scaling.neg_spacing: Minimum height of chart below the baseline. Chart bottom is determined based on the minimum height of waveforms associated with the chart. If the minimum height is above this value, this value is set as the chart bottom (default -0.1). - formatter.axis_break.length: Waveform or idle time duration that axis break is applied. Intervals longer than this value are truncated. The value is in units of data points (default `3000`). - formatter.axis_break.max_length: Length of new waveform or idle time duration after axis break is applied. Longer intervals are truncated to this length (default `1000`). - formatter.control.apply_phase_modulation: Set `True` to apply phase modulation to the waveforms (default `True`). - formatter.control.show_snapshot_channel: Set `True` to show snapshot instructions (default `True`). - formatter.control.show_acquire_channel: Set `True` to show acquire channels (default `True`). - formatter.control.show_empty_channel: Set `True` to show charts without any waveforms (default `True`). - formatter.control.auto_chart_scaling: Set `True` to apply auto-scaling to charts (default `True`). - formatter.control.axis_break: Set `True` to apply axis break for long intervals (default `True`). - formatter.unicode_symbol.frame_change: Text that represents the symbol of frame change. This text is used when the plotter doesn't support latex (default u'\u21BA'). - formatter.unicode_symbol.snapshot: Text that represents the symbol of snapshot. This text is used when the plotter doesn't support latex (default u'\u21AF'). - formatter.latex_symbol.frame_change: Latex text that represents the symbol of frame change (default r'\\circlearrowleft'). - formatter.latex_symbol.snapshot: Latex text that represents the symbol of snapshot (default ''). - generator.waveform: List of callback functions that generates drawing object for waveforms. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.waveform` for more details. No default generator is set. - generator.frame: List of callback functions that generates drawing object for frame changes. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.frame` for more details. No default generator is set. - generator.chart: List of callback functions that generates drawing object for charts. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.chart` for more details. No default generator is set. - generator.snapshot: List of callback functions that generates drawing object for snapshots. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.snapshot` for more details. No default generator is set. - generator.barrier: List of callback functions that generates drawing object for barriers. Arbitrary callback functions satisfying the generator format can be set here. There are some default generators in the pulse drawer. See :py:mod:~`qiskit.visualization.pulse_v2.generators.barrier` for more details. No default generator is set. - layout.chart_channel_map: Callback function that determines the relationship between pulse channels and charts. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. - layout.time_axis_map: Callback function that determines the layout of horizontal axis labels. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. - layout.figure_title: Callback function that generates a string for the figure title. See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details. No default layout is set. Raises: ImportError: When required visualization package is not installed. VisualizationError: When invalid plotter API is specified. """""" temp_style = stylesheet.QiskitPulseStyle() temp_style.update(style or stylesheet.IqxStandard()) if backend: device = device_info.OpenPulseBackendInfo.create_from_backend(backend) else: device = device_info.OpenPulseBackendInfo() # create empty canvas and load program canvas = core.DrawerCanvas(stylesheet=temp_style, device=device) canvas.load_program(program=program) # # update configuration # # time range if time_range_dt: canvas.set_time_range(*time_range_dt, seconds=False) if time_range_ns: canvas.set_time_range(*time_range_ns, seconds=True) # channels not shown if disable_channels: for chan in disable_channels: canvas.set_disable_channel(chan, remove=True) # show snapshots if not show_snapshot: canvas.set_disable_type(types.DrawingSymbol.SNAPSHOT, remove=True) canvas.set_disable_type(types.DrawingLabel.SNAPSHOT, remove=True) # show frame changes if not show_framechange: canvas.set_disable_type(types.DrawingSymbol.FRAME, remove=True) canvas.set_disable_type(types.DrawingLabel.FRAME, remove=True) # show waveform info if not show_waveform_info: canvas.set_disable_type(types.DrawingLabel.PULSE_INFO, remove=True) canvas.set_disable_type(types.DrawingLabel.PULSE_NAME, remove=True) # show barrier if not show_barrier: canvas.set_disable_type(types.DrawingLine.BARRIER, remove=True) canvas.update() # # Call plotter API and generate image # if plotter == types.Plotter.Mpl2D: try: from qiskit.visualization.pulse_v2.plotters import Mpl2DPlotter except ImportError: raise ImportError('Must have Matplotlib installed.') plotter_api = Mpl2DPlotter(canvas=canvas, axis=axis) plotter_api.draw() else: raise VisualizationError('Plotter API {name} is not supported.'.format(name=plotter)) # save figure if filename: plotter_api.save_file(filename=filename) return plotter_api.get_image() " 22287,"def serve_webapp(webapp, port=None, host=None): """"""Serve the webapp on a recommend port or a free one. Return the port the webapp is running on. """""" server = None for port in attempt_ports(port): try: server = httpserver.serve(webapp, host=host, port=port, start_loop=False) break except OSError as e: if e.errno == 98: continue raise t = threading.Thread(target=server.serve_forever) t.start() return server, port ","def serve_webapp(webapp, port=None, host=None): """"""Serve the webapp on a recommend port or a free one. Return the port the webapp is running on. """""" server = None for port in attempt_ports(port): try: server = httpserver.serve(webapp, host=host, port=port, start_loop=False) break except socket.error as e: if e.errno == 98: continue raise t = threading.Thread(target=server.serve_forever) t.start() return server, port " 19885,"def macadam_limits(target_brightness, illuminant=()): """""" whavelenght reaches from 360 to 830 nm, in within the programm it is handled as 0 to 470. Beyond the references this programm is very fast, because the possible optimums are not simply tested step by step but more effectively targeted by steps of power of two. The whavelenghts left and right of a rough optimum are fited by a rule of proportion, so that the wished brightness will be reached exactly. Parameters ---------- target_brightness : floating point brightness has to be between 0 and 1 illuminant: object illuminant must be out of colorimetry.MSDS_CMFS['XXX'] If there is no illuminant or it has the wrong form, the illuminant SDS_ILLUMINANTS['E'] is choosen wich has no influence to the calculations, because it is an equal-energy-spectrum if necessary a third parameter for the colour-matching funciton could easily be implemented Returns ------- an array of CIE -X,Y,Z - Triples for every single whavelength in single nm - Steps in the range from 360 to 830 nm References ---------- - cite: Wyszecki, G., & Stiles, W. S. (2000). In Color Science: Concepts and Methods, Quantitative Data and Formulae (pp. 181–184). Wiley. ISBN:978-0-471-39918-6 - cite: Francisco Martínez-Verdú, Esther Perales, Elisabet Chorro, Dolores de Fez, Valentín Viqueira, and Eduardo Gilabert, ""Computation and visualization of the MacAdam limits for any lightness, hue angle, and light source,"" J. Opt. Soc. Am. A 24, 1501-1515 (2007) - cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010 / Vol. 35, No. 1 (pp. 2031 - 2033) Example -------- from matplotlib import pyplot as plt import numpy as np import math fig = plt.figure(figsize=(7,7)) ax = fig.add_axes([0,0,1,1]) illuminant = colour.SDS_ILLUMINANTS['D65'] def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra): FirstColumn = 0 SecondColumn = 1 x = Yxy_Narrowband_Spectra[...,FirstColumn] y = Yxy_Narrowband_Spectra[...,SecondColumn] ax.plot(x,y,'orange',label='Spectrum Loci') x = [Yxy_Narrowband_Spectra[-1][FirstColumn], Yxy_Narrowband_Spectra[0][FirstColumn]] y = [Yxy_Narrowband_Spectra[-1][SecondColumn], Yxy_Narrowband_Spectra[0][SecondColumn]] ax.plot(x,y,'purple',label='Purple Boundary') return() for n in range(1, 20): Yxy_Narrowband_Spectra = colour.XYZ_to_xy( colour.macadam_limits(n/20, illuminant) / 100) plot_Narrowband_Spectra (Yxy_Narrowband_Spectra) plt.show() """""" target_bright = target_brightness if target_bright > 1 or target_bright < 0: raise TypeError('brightness of function macadam_limits( )' 'has to be between 0 and 1') standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer'] X_cie31 = standard_cfms.values[..., 0] Y_cie31 = standard_cfms.values[..., 1] Z_cie31 = standard_cfms.values[..., 2] try: illuminant.interpolator except AttributeError: illuminant = SDS_ILLUMINANTS['E'] # If there is no illuminant or it has the wrong form, # an illuminant choosen with no influence # If the illuminanats do not match the format of the Standard Observer, # they have to be adaptet illuminant.extrapolate(SpectralShape(360, 830)) illuminant.interpolate(SpectralShape(360, 830, 1)) # The cie31 cmfs are convolved with the given illuminant X_illuminated = X_cie31 * illuminant.values Y_illuminated = Y_cie31 * illuminant.values Z_illuminated = Z_cie31 * illuminant.values # Generate empty output-array out_limits = np.zeros_like(standard_cfms.values) # This Array has 471 entries for whavelenghts from 360 nm to 830 nm opti_colour = np.zeros_like(Y_illuminated) # The array of optimal colours has the same dimensions like Y_illuminated # and all entries are initialy set to zero middle_opti_colour = 235 # is a constant and not be changed. At 595nm (360 + 235) # in the middle of the center_opti_colour-array # be aware that counting in array-positions starts at zero # The first optimum color has its center initialy at zero maximum_brightness = np.sum(Y_illuminated) # ""integral"" over Y_illuminated def optimum_colour(width, center): opti_colour = np.zeros(471) # creates array of 471 zeros and ones which represents optimum-colours # All values of the opti_colour-array are intialy set to zero half_width = width center_opti_colour = center middle_opti_colour = 235 opti_colour[middle_opti_colour - half_width:middle_opti_colour + half_width + 1] = 1 # we start the construction of the optimum color # at the center of the opti_colour-array opti_colour = np.roll(opti_colour, center_opti_colour - middle_opti_colour) # the optimum colour is rolled to the right whavelenght return opti_colour def bright_opti_colour(width, center, lightsource): brightness = np.sum( optimum_colour(width, center) * lightsource) / maximum_brightness return brightness step_size = np.array([64, 32, 16, 8, 4, 2, 1]) for whavelength in range(0, 471): width = 127 for n in step_size: brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness > target_bright or width > 234: width -= n else: width += n brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness < target_bright: width += 1 brightness = bright_opti_colour(width, whavelength, Y_illuminated) rough_optimum = optimum_colour(width, whavelength) brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness # in the following, the both borders of the found rough_optimum # are reduced to get more exact results bright_difference = (brightness - target_bright) * maximum_brightness # discrimination for single-whavelenght-spectra if width > 0: opti_colour = np.zeros(471) opti_colour[middle_opti_colour - width:middle_opti_colour + width + 1] = 1 # instead rolling foreward opti_colour, light is rolled backward rolled_light = np.roll(Y_illuminated, middle_opti_colour - whavelength) opti_colour_light = opti_colour * rolled_light left_opti = opti_colour_light[middle_opti_colour - width] right_opti = opti_colour_light[middle_opti_colour + width] interpolation = 1 - (bright_difference / (left_opti + right_opti)) opti_colour[middle_opti_colour - width] = interpolation opti_colour[middle_opti_colour + width] = interpolation # opti_colour is rolled to right possition final_optimum = np.roll(opti_colour, whavelength - middle_opti_colour) else: final_optimum = rough_optimum / brightness * target_bright out_X = np.sum(final_optimum * X_illuminated) out_Y = target_bright * maximum_brightness out_Z = np.sum(final_optimum * Z_illuminated) triple = np.array([out_X, out_Y, out_Z]) out_limits[whavelength] = triple return (out_limits) ","def macadam_limits(target_brightness, illuminant=()): """""" whavelenght reaches from 360 to 830 nm, in within the programm it is handled as 0 to 470. Beyond the references this programm is very fast, because the possible optimums are not simply tested step by step but more effectively targeted by steps of power of two. The whavelenghts left and right of a rough optimum are fited by a rule of proportion, so that the wished brightness will be reached exactly. Parameters ---------- target_brightness : floating point brightness has to be between 0 and 1 illuminant: object illuminant must be out of colorimetry.MSDS_CMFS['XXX'] If there is no illuminant or it has the wrong form, the illuminant SDS_ILLUMINANTS['E'] is choosen wich has no influence to the calculations, because it is an equal-energy-spectrum if necessary a third parameter for the colour-matching funciton could easily be implemented Returns ------- an array of CIE -X,Y,Z - Triples for every single whavelength in single nm - Steps in the range from 360 to 830 nm References ---------- - cite: Wyszecki, G., & Stiles, W. S. (2000). In Color Science: Concepts and Methods, Quantitative Data and Formulae (pp. 181–184). Wiley. ISBN:978-0-471-39918-6 - cite: Francisco Martínez-Verdú, Esther Perales, Elisabet Chorro, Dolores de Fez, Valentín Viqueira, and Eduardo Gilabert, ""Computation and visualization of the MacAdam limits for any lightness, hue angle, and light source,"" J. Opt. Soc. Am. A 24, 1501-1515 (2007) - cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010 / Vol. 35, No. 1 (pp. 2031 - 2033) Example -------- from matplotlib import pyplot as plt import numpy as np import math fig = plt.figure(figsize=(7,7)) ax = fig.add_axes([0,0,1,1]) illuminant = colour.SDS_ILLUMINANTS['D65'] def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra): FirstColumn = 0 SecondColumn = 1 x = Yxy_Narrowband_Spectra[...,FirstColumn] y = Yxy_Narrowband_Spectra[...,SecondColumn] ax.plot(x,y,'orange',label='Spectrum Loci') x = [Yxy_Narrowband_Spectra[-1][FirstColumn], Yxy_Narrowband_Spectra[0][FirstColumn]] y = [Yxy_Narrowband_Spectra[-1][SecondColumn], Yxy_Narrowband_Spectra[0][SecondColumn]] ax.plot(x,y,'purple',label='Purple Boundary') return() for n in range(1, 20): Yxy_Narrowband_Spectra = colour.XYZ_to_xy( colour.macadam_limits(n/20, illuminant) / 100) plot_Narrowband_Spectra (Yxy_Narrowband_Spectra) plt.show() """""" target_bright = target_brightness if target_bright > 1 or target_bright < 0: raise TypeError('brightness of function macadam_limits( )' 'has to be between 0 and 1') standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer'] X_cie31 = standard_cfms.values[..., 0] Y_cie31 = standard_cfms.values[..., 1] Z_cie31 = standard_cfms.values[..., 2] try: illuminant.interpolator except AttributeError: illuminant = SDS_ILLUMINANTS['E'] # If there is no illuminant or it has the wrong form, # an illuminant choosen with no influence # If the illuminanats do not match the format of the Standard Observer, # they have to be adaptet illuminant.extrapolate(SpectralShape(360, 830)) illuminant.interpolate(SpectralShape(360, 830, 1)) # The cie31 cmfs are convolved with the given illuminant X_illuminated = X_cie31 * illuminant.values Y_illuminated = Y_cie31 * illuminant.values Z_illuminated = Z_cie31 * illuminant.values # Generate empty output-array out_limits = np.zeros_like(standard_cfms.values) # This Array has 471 entries for whavelenghts from 360 nm to 830 nm opti_colour = np.zeros_like(Y_illuminated) # The array of optimal colours has the same dimensions like Y_illuminated # and all entries are initialy set to zero middle_opti_colour = 235 # is a constant and not be changed. At 595nm (360 + 235) # in the middle of the center_opti_colour-array # be aware that counting in array-positions starts at zero # The first optimum color has its center initialy at zero maximum_brightness = np.sum(Y_illuminated) # ""integral"" over Y_illuminated def optimum_colour(width, center): opti_colour = np.zeros(471) # creates array of 471 zeros and ones which represents optimum-colours # All values of the opti_colour-array are intialy set to zero half_width = width center_opti_colour = center middle_opti_colour = 235 opti_colour[middle_opti_colour - half_width:middle_opti_colour + half_width + 1] = 1 # we start the construction of the optimum color # at the center of the opti_colour-array opti_colour = np.roll(opti_colour, center_opti_colour - middle_opti_colour) # the optimum colour is rolled to the right whavelenght return opti_colour def bright_opti_colour(width, center, lightsource): brightness = np.sum( optimum_colour(width, center) * lightsource) / maximum_brightness return brightness step_size = np.array([64, 32, 16, 8, 4, 2, 1]) for whavelength in range(0, 471): width = 127 for n in step_size: brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness > target_bright or width > 234: width -= n else: width += n brightness = bright_opti_colour(width, whavelength, Y_illuminated) if brightness < target_bright: width += 1 brightness = bright_opti_colour(width, whavelength, Y_illuminated) rough_optimum = optimum_colour(width, whavelength) brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness # in the following, the both borders of the found rough_optimum # are reduced to get more exact results bright_difference = (brightness - target_bright) * maximum_brightness # discrimination for single-wavelength-spectra if width > 0: opti_colour = np.zeros(471) opti_colour[middle_opti_colour - width:middle_opti_colour + width + 1] = 1 # instead rolling foreward opti_colour, light is rolled backward rolled_light = np.roll(Y_illuminated, middle_opti_colour - whavelength) opti_colour_light = opti_colour * rolled_light left_opti = opti_colour_light[middle_opti_colour - width] right_opti = opti_colour_light[middle_opti_colour + width] interpolation = 1 - (bright_difference / (left_opti + right_opti)) opti_colour[middle_opti_colour - width] = interpolation opti_colour[middle_opti_colour + width] = interpolation # opti_colour is rolled to right possition final_optimum = np.roll(opti_colour, whavelength - middle_opti_colour) else: final_optimum = rough_optimum / brightness * target_bright out_X = np.sum(final_optimum * X_illuminated) out_Y = target_bright * maximum_brightness out_Z = np.sum(final_optimum * Z_illuminated) triple = np.array([out_X, out_Y, out_Z]) out_limits[whavelength] = triple return (out_limits) " 26953,"def task_instance_link(attr): """"""Generates a URL to the Graph view for a TaskInstance."""""" dag_id = attr.get('dag_id') task_id = attr.get('task_id') execution_date = attr.get('dag_run.execution_date') or attr.get('execution_date') or timezone.utcnow() url = url_for('Airflow.task', dag_id=dag_id, task_id=task_id) url_root = url_for( 'Airflow.graph', dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat() ) return Markup( """"""
{task_id} filter_alt """""" ).format(url=url, task_id=task_id, url_root=url_root) ","def task_instance_link(attr): """"""Generates a URL to the Graph view for a TaskInstance."""""" dag_id = attr.get('dag_id') task_id = attr.get('task_id') execution_date = attr.get('dag_run.execution_date') or attr.get('execution_date') or timezone.utcnow() url = url_for('Airflow.task', dag_id=dag_id, task_id=task_id, execution_date=execution_date.isoformat()) url_root = url_for( 'Airflow.graph', dag_id=dag_id, root=task_id, execution_date=execution_date.isoformat() ) return Markup( """""" {task_id} filter_alt """""" ).format(url=url, task_id=task_id, url_root=url_root) " 29409,"def stop(invalidate=False): from motioneye import mjpgclient global _started _started = False if not running(): return logging.debug('stopping motion') mjpgclient.close_all(invalidate=invalidate) pid = _get_pid() if pid is not None: try: # send the TERM signal once os.kill(pid, signal.SIGTERM) # wait 5 seconds for the process to exit for i in range(50): # @UnusedVariable os.waitpid(pid, os.WNOHANG) time.sleep(0.1) # send the KILL signal once os.kill(pid, signal.SIGKILL) # wait 2 seconds for the process to exit for i in range(20): # @UnusedVariable time.sleep(0.1) os.waitpid(pid, os.WNOHANG) # the process still did not exit if settings.ENABLE_REBOOT: logging.error('could not terminate the motion process') PowerControl.reboot() else: raise Exception('could not terminate the motion process') except OSError as e: if e.errno not in (errno.ESRCH, errno.ECHILD): raise # even if motion is killed, the port is not always free # so we wait until we are able to bind it again s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) max_wait = 30 waited_for = 0 while waited_for <= max_wait: try: s.bind(('127.0.0.1', settings.MOTION_CONTROL_PORT)) s.close() logging.debug('motion port has been released') return except OSError as e: logging.debug(f'motion port has not been released yet: {e}') waited_for += 1 time.sleep(1) logging.debug('motion port has not been released in 30 seconds') raise ","def stop(invalidate=False): from motioneye import mjpgclient global _started _started = False if not running(): return logging.debug('stopping motion') mjpgclient.close_all(invalidate=invalidate) pid = _get_pid() if pid is not None: try: # send the TERM signal once os.kill(pid, signal.SIGTERM) # wait 5 seconds for the process to exit for i in range(50): # @UnusedVariable os.waitpid(pid, os.WNOHANG) time.sleep(0.1) # send the KILL signal once os.kill(pid, signal.SIGKILL) # wait 2 seconds for the process to exit for i in range(20): # @UnusedVariable time.sleep(0.1) os.waitpid(pid, os.WNOHANG) # the process still did not exit if settings.ENABLE_REBOOT: logging.error('could not terminate the motion process') PowerControl.reboot() else: raise Exception('could not terminate the motion process') except OSError as e: if e.errno not in (errno.ESRCH, errno.ECHILD): raise # even if motion is killed, the port is not always free # so we wait until we are able to bind it again s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) max_wait = 30 waited_for = 0 while waited_for <= max_wait: try: s.bind(('127.0.0.1', settings.MOTION_CONTROL_PORT)) s.close() logging.debug('motion port has been released') return except OSError as e: logging.debug(f'motion port has not yet been released: {e!r}') waited_for += 1 time.sleep(1) logging.debug('motion port has not been released in 30 seconds') raise " 25752,"def join_exprs(df): """""" Helper function to join arrays, series or frames of stings together. """""" return ''.join(np.asarray(df).flatten()) ","def join_exprs(df): """""" Helper function to join arrays, series or frames of strings together. """""" return ''.join(np.asarray(df).flatten()) " 43961,"def attraction_matrix(basis_functions, charges, r): r""""""Return a function that computes the attraction matrix for a given set of basis functions. Args: basis_functions (list[BasisFunction]): basis functions Returns: basis_functions (list[BasisFunction]): basis functions charges (list[int]): nuclear charges r (array[float]): nuclear positions **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True), >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> attraction_matrix(mol.basis_set, mol.nuclear_charges, mol.coordinates)(*args) array([[-2.03852057, -1.60241667], [-1.60241667, -2.03852057]]) """""" def attraction(*args): r""""""Construct the attraction matrix for a given set of basis functions. Args: args (array[float]): initial values of the differentiable parameters Returns: array[float]: the attraction matrix """""" n = len(basis_functions) v = anp.zeros((n, n)) for i, a in enumerate(basis_functions): for j, b in enumerate(basis_functions): attraction_integral = 0 if i <= j: if args: args_ab = [] if r.requires_grad: for l in range(len(args) - 1): args_ab.append(args[l + 1][[i, j]]) else: for l in range(len(args)): args_ab.append(args[l][[i, j]]) for k, c in enumerate(r): if c.requires_grad: args_ab = [args[0][k]] + args_ab attraction_integral = attraction_integral - charges[ k ] * generate_attraction(c, a, b)(*args_ab) if c.requires_grad: args_ab = args_ab[1:] else: for k, c in enumerate(r): attraction_integral = ( attraction_integral - charges[k] * generate_attraction(c, a, b)() ) o = anp.zeros((n, n)) o[i, j] = o[j, i] = 1.0 v = v + attraction_integral * o return v return attraction ","def attraction_matrix(basis_functions, charges, r): r""""""Return a function that computes the nuclear attraction matrix for a given set of basis functions. Args: basis_functions (list[BasisFunction]): basis functions Returns: basis_functions (list[BasisFunction]): basis functions charges (list[int]): nuclear charges r (array[float]): nuclear positions **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True), >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> attraction_matrix(mol.basis_set, mol.nuclear_charges, mol.coordinates)(*args) array([[-2.03852057, -1.60241667], [-1.60241667, -2.03852057]]) """""" def attraction(*args): r""""""Construct the attraction matrix for a given set of basis functions. Args: args (array[float]): initial values of the differentiable parameters Returns: array[float]: the attraction matrix """""" n = len(basis_functions) v = anp.zeros((n, n)) for i, a in enumerate(basis_functions): for j, b in enumerate(basis_functions): attraction_integral = 0 if i <= j: if args: args_ab = [] if r.requires_grad: for l in range(len(args) - 1): args_ab.append(args[l + 1][[i, j]]) else: for l in range(len(args)): args_ab.append(args[l][[i, j]]) for k, c in enumerate(r): if c.requires_grad: args_ab = [args[0][k]] + args_ab attraction_integral = attraction_integral - charges[ k ] * generate_attraction(c, a, b)(*args_ab) if c.requires_grad: args_ab = args_ab[1:] else: for k, c in enumerate(r): attraction_integral = ( attraction_integral - charges[k] * generate_attraction(c, a, b)() ) o = anp.zeros((n, n)) o[i, j] = o[j, i] = 1.0 v = v + attraction_integral * o return v return attraction " 7330,"def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, overlap=.5, log_scale=False, *, threshold_rel=None): """"""Finds blobs in the given grayscale image. Blobs are found using the Determinant of Hessian method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian Kernel used for the Hessian matrix whose determinant detected the blob. Determinant of Hessians is approximated using [2]_. Parameters ---------- image : 2D ndarray Input grayscale image.Blobs can either be light on dark or vice versa. min_sigma : float, optional The minimum standard deviation for Gaussian Kernel used to compute Hessian matrix. Keep this low to detect smaller blobs. max_sigma : float, optional The maximum standard deviation for Gaussian Kernel used to compute Hessian matrix. Keep this high to detect larger blobs. num_sigma : int, optional The number of intermediate values of standard deviations to consider between `min_sigma` and `max_sigma`. threshold : float or None, optional The absolute lower bound for scale space maxima. Local maxima smaller than `threshold` are ignored. Reduce this to detect blobs with lower intensities. If `threshold_rel` is also specified, whichever threshold is larger will be used. If None, `threshold_rel` is used instead. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. log_scale : bool, optional If set intermediate values of standard deviations are interpolated using a logarithmic scale to the base `10`. If not, linear interpolation is used. threshold_rel : float or None, optional Minimum intensity of peaks, calculated as ``max(dog_space) * threshold_rel``. Where ``dog_space`` refers to the stack of determinant-of-hessian (DoH) images computed internally. This should have a value between 0 and 1. If None, `threshold_abs` is used instead. Returns ------- A : (n, 3) ndarray A 2d array with each row representing 3 values, ``(y,x,sigma)`` where ``(y,x)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel of the Hessian Matrix whose determinant detected the blob. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian .. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool, ""SURF: Speeded Up Robust Features"" ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf Examples -------- >>> from skimage import data, feature >>> img = data.coins() >>> feature.blob_doh(img) array([[197. , 153. , 20.33333333], [124. , 336. , 20.33333333], [126. , 153. , 20.33333333], [195. , 100. , 23.55555556], [192. , 212. , 23.55555556], [121. , 271. , 30. ], [126. , 101. , 20.33333333], [193. , 275. , 23.55555556], [123. , 205. , 20.33333333], [270. , 363. , 30. ], [265. , 113. , 23.55555556], [262. , 243. , 23.55555556], [185. , 348. , 30. ], [156. , 302. , 30. ], [123. , 44. , 23.55555556], [260. , 173. , 30. ], [197. , 44. , 20.33333333]]) Notes ----- The radius of each blob is approximately `sigma`. Computation of Determinant of Hessians is independent of the standard deviation. Therefore detecting larger blobs won't take more time. In methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation of Gaussians for larger `sigma` takes more time. The downside is that this method can't be used for detecting blobs of radius less than `3px` due to the box filters used in the approximation of Hessian Determinant. """""" check_nD(image, 2) image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) image = integral_image(image) if log_scale: start, stop = math.log(min_sigma, 10), math.log(max_sigma, 10) sigma_list = np.logspace(start, stop, num_sigma) else: sigma_list = np.linspace(min_sigma, max_sigma, num_sigma) hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list] image_cube = np.dstack(hessian_images) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, threshold_rel=threshold_rel, exclude_border=False, footprint=np.ones((3,) * image_cube.ndim)) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # Convert the last index to its corresponding scale value lm[:, -1] = sigma_list[local_maxima[:, -1]] return _prune_blobs(lm, overlap) ","def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, overlap=.5, log_scale=False, *, threshold_rel=None): """"""Finds blobs in the given grayscale image. Blobs are found using the Determinant of Hessian method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian Kernel used for the Hessian matrix whose determinant detected the blob. Determinant of Hessians is approximated using [2]_. Parameters ---------- image : 2D ndarray Input grayscale image.Blobs can either be light on dark or vice versa. min_sigma : float, optional The minimum standard deviation for Gaussian Kernel used to compute Hessian matrix. Keep this low to detect smaller blobs. max_sigma : float, optional The maximum standard deviation for Gaussian Kernel used to compute Hessian matrix. Keep this high to detect larger blobs. num_sigma : int, optional The number of intermediate values of standard deviations to consider between `min_sigma` and `max_sigma`. threshold : float or None, optional The absolute lower bound for scale space maxima. Local maxima smaller than `threshold` are ignored. Reduce this to detect blobs with lower intensities. If `threshold_rel` is also specified, whichever threshold is larger will be used. If None, `threshold_rel` is used instead. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. log_scale : bool, optional If set intermediate values of standard deviations are interpolated using a logarithmic scale to the base `10`. If not, linear interpolation is used. threshold_rel : float or None, optional Minimum intensity of peaks, calculated as ``max(dog_space) * threshold_rel``. Where ``dog_space`` refers to the stack of determinant-of-hessian (DoH) images computed internally. This should have a value between 0 and 1. If None, `threshold` is used instead. Returns ------- A : (n, 3) ndarray A 2d array with each row representing 3 values, ``(y,x,sigma)`` where ``(y,x)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel of the Hessian Matrix whose determinant detected the blob. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian .. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool, ""SURF: Speeded Up Robust Features"" ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf Examples -------- >>> from skimage import data, feature >>> img = data.coins() >>> feature.blob_doh(img) array([[197. , 153. , 20.33333333], [124. , 336. , 20.33333333], [126. , 153. , 20.33333333], [195. , 100. , 23.55555556], [192. , 212. , 23.55555556], [121. , 271. , 30. ], [126. , 101. , 20.33333333], [193. , 275. , 23.55555556], [123. , 205. , 20.33333333], [270. , 363. , 30. ], [265. , 113. , 23.55555556], [262. , 243. , 23.55555556], [185. , 348. , 30. ], [156. , 302. , 30. ], [123. , 44. , 23.55555556], [260. , 173. , 30. ], [197. , 44. , 20.33333333]]) Notes ----- The radius of each blob is approximately `sigma`. Computation of Determinant of Hessians is independent of the standard deviation. Therefore detecting larger blobs won't take more time. In methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation of Gaussians for larger `sigma` takes more time. The downside is that this method can't be used for detecting blobs of radius less than `3px` due to the box filters used in the approximation of Hessian Determinant. """""" check_nD(image, 2) image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) image = integral_image(image) if log_scale: start, stop = math.log(min_sigma, 10), math.log(max_sigma, 10) sigma_list = np.logspace(start, stop, num_sigma) else: sigma_list = np.linspace(min_sigma, max_sigma, num_sigma) hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list] image_cube = np.dstack(hessian_images) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, threshold_rel=threshold_rel, exclude_border=False, footprint=np.ones((3,) * image_cube.ndim)) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # Convert the last index to its corresponding scale value lm[:, -1] = sigma_list[local_maxima[:, -1]] return _prune_blobs(lm, overlap) " 45784,"def apply_equalize3d(input: torch.Tensor, params: Dict[str, torch.Tensor]): r""""""Equalize a tensor volume or a batch of tensors volumes with given random parameters. Input should be a tensor of shape :math:`(D, H, W)`, :math:`(C, D, H, W)` or :math:`(*, C, D, H, W)`. Args: input (torch.Tensor): Tensor to be transformed with shape :math:`(D, H, W)`, :math:`(C, D, H, W)`, :math:`(*, C, D, H, W)`. params (Dict[str, torch.Tensor]): - params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch. Example: With input batchsize of 4, only the first two tensors will be transformed if batch_prob is [True, True, False, False]. Returns: torch.Tensor: The equalized input. """""" input = _transform_input3d(input) _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64]) res = [] for image, prob in zip(input, params['batch_prob']): res.append(equalize3d(image) if prob else _transform_input3d(image)) return torch.cat(res, dim=0) ","def apply_equalize3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: r""""""Equalize a tensor volume or a batch of tensors volumes with given random parameters. Input should be a tensor of shape :math:`(D, H, W)`, :math:`(C, D, H, W)` or :math:`(*, C, D, H, W)`. Args: input (torch.Tensor): Tensor to be transformed with shape :math:`(D, H, W)`, :math:`(C, D, H, W)`, :math:`(*, C, D, H, W)`. params (Dict[str, torch.Tensor]): - params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch. Example: With input batchsize of 4, only the first two tensors will be transformed if batch_prob is [True, True, False, False]. Returns: torch.Tensor: The equalized input. """""" input = _transform_input3d(input) _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64]) res = [] for image, prob in zip(input, params['batch_prob']): res.append(equalize3d(image) if prob else _transform_input3d(image)) return torch.cat(res, dim=0) " 37211,"def active_backend(): """"""Get the backend of the currently active context. Returns: BaseBackend: The active backend in the currently active builder context. Raises: exceptions.PulseError: If the builder does not have a backend set. """""" builder = _active_builder().backend if builder is None: raise BackendNotSet( 'This function requires the active builder to ' 'have a ""backend"" set.') return builder ","def active_backend(): """"""Get the backend of the currently active context. Returns: BaseBackend: The active backend in the currently active builder context. Raises: exceptions.BackendNotSet: If the builder does not have a backend set. """""" builder = _active_builder().backend if builder is None: raise BackendNotSet( 'This function requires the active builder to ' 'have a ""backend"" set.') return builder " 1840,"def test_isotonic_2darray_more_than_1_feature(): # Ensure IsotonicRegression raises error if input has more than 1 feature X = np.array(list(range(10))) X_2d = np.array([[x, x] for x in range(10)]) y = np.array([0, 1, 2, 6, 5, 4, 3, 7, 8, 9]) msg = ""1d array or 2d array with 1 feature"" with pytest.raises(ValueError, match=msg): IsotonicRegression().fit(X_2d, y) iso_reg = IsotonicRegression().fit(X, y) with pytest.raises(ValueError, match=msg): iso_reg.predict(X_2d) with pytest.raises(ValueError, match=msg): iso_reg.transform(X_2d) ","def test_isotonic_2darray_more_than_1_feature(): # Ensure IsotonicRegression raises error if input has more than 1 feature X = np.array(list(range(10))) X_2d = np.c_[X, X] y = np.array([0, 1, 2, 6, 5, 4, 3, 7, 8, 9]) msg = ""1d array or 2d array with 1 feature"" with pytest.raises(ValueError, match=msg): IsotonicRegression().fit(X_2d, y) iso_reg = IsotonicRegression().fit(X, y) with pytest.raises(ValueError, match=msg): iso_reg.predict(X_2d) with pytest.raises(ValueError, match=msg): iso_reg.transform(X_2d) " 55428,"def cloudpickle_deserialization_check(flow_file_paths: list): flows = [] for flow_file in flow_file_paths: with open(flow_file, ""rb"") as f: try: flows.append(cloudpickle.load(f)) except ModuleNotFoundError as exc: warnings.warn( ""Flow uses module which is not importable. Refer to documentation "" ""on how to import custom modules "" ""https://docs.prefect.io/api/latest/environments/storage.html#docker"", stacklevel=2, ) raise exc print(""Cloudpickle serialization check: OK"") return flows ","def cloudpickle_deserialization_check(flow_file_paths: list): flows = [] for flow_file in flow_file_paths: with open(flow_file, ""rb"") as f: try: flows.append(cloudpickle.load(f)) except ModuleNotFoundError: warnings.warn( ""Flow uses module which is not importable. Refer to documentation "" ""on how to import custom modules "" ""https://docs.prefect.io/api/latest/environments/storage.html#docker"", stacklevel=2, ) raise print(""Cloudpickle serialization check: OK"") return flows " 9626,"def check_cli(module, cli): """""" This method checks for pim ssm config using the vrouter-show command. If a user already exists on the given switch, return True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string """""" name = module.params['pn_vrouter_name'] network = module.params['pn_network'] VROUTER_EXISTS = '' show = cli cli += ' vrouter-show name %s ' % name cli += 'format name no-show-headers' rc, out, err = module.run_command(cli, use_unsafe_shell=True) if out: pass else: VROUTER_EXISTS = None cli = show cli += ' vrouter-bgp-network-show vrouter-name %s ' % name cli += 'network %s format network no-show-headers' % network out = module.run_command(cli, use_unsafe_shell=True)[1] NETWORK_EXISTS = True if network in out else False return NETWORK_EXISTS, VROUTER_EXISTS ","def check_cli(module, cli): """""" This method checks for pim ssm config using the vrouter-show command. If a user already exists on the given switch, return True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string """""" name = module.params['pn_vrouter_name'] network = module.params['pn_network'] VROUTER_EXISTS = '' show = cli cli += ' vrouter-show name %s ' % name cli += 'format name no-show-headers' rc, out, err = module.run_command(cli, use_unsafe_shell=True) if out: pass else: VROUTER_EXISTS = None cli = show cli += ' vrouter-bgp-network-show vrouter-name %s network %s format network no-show-headers' % (name, network) cli += 'network %s format network no-show-headers' % network out = module.run_command(cli, use_unsafe_shell=True)[1] NETWORK_EXISTS = True if network in out else False return NETWORK_EXISTS, VROUTER_EXISTS " 30426,"def process_integration(file_path): """""" Process integration dir or file Arguments: file_path {string} -- file path to integration file Returns: list -- integration data list (may be empty) """""" res = [] if os.path.isfile(file_path): if checked_type(file_path, (INTEGRATION_REGEX, BETA_INTEGRATION_REGEX, PACKS_INTEGRATION_YML_REGEX)): print(""adding {0} to id_set"".format(file_path)) res.append(get_integration_data(file_path)) else: for yml_file in glob.glob(os.path.join(file_path, os.path.basename(file_path) + '.yml')): print(""adding {0} to id_set"".format(yml_file)) res.append(get_integration_data(yml_file)) return res ","def process_integration(file_path): """""" Process integration dir or file Arguments: file_path {string} -- file path to integration file Returns: list -- integration data list (may be empty) """""" res = [] if os.path.isfile(file_path): if checked_type(file_path, (INTEGRATION_REGEX, BETA_INTEGRATION_REGEX, PACKS_INTEGRATION_REGEX)): print(""adding {0} to id_set"".format(file_path)) res.append(get_integration_data(file_path)) else: for yml_file in glob.glob(os.path.join(file_path, os.path.basename(file_path) + '.yml')): print(""adding {0} to id_set"".format(yml_file)) res.append(get_integration_data(yml_file)) return res " 8669,"def load_settings(options): """"""Load Sopel settings using the command line's ``options``. :param options: parsed arguments :return: sopel configuration :rtype: :class:`sopel.config.Config` :raise sopel.config.NotFound: raised when configuration file is not found :raise sopel.config.ConfigurationError: raised when configuration is invalid This function load Sopel settings from one of these sources: * value of ``options.config``, if given, * or the ``default`` configuration is loaded, then loads the settings and returns it as a :class:`~sopel.config.Config` object. If the configuration file can not be found, a :exc:`sopel.config.NotFound` error will be raised. .. note:: To use this function effectively, the :func:`sopel.cli.utils.add_config_arguments` function should be used to add the proper option to the argument parser. """""" name = options.config or 'default' filename = find_config(config.DEFAULT_HOMEDIR, name) if not os.path.isfile(filename): raise config.NotFound(filename=filename) return config.Config(filename) ","def load_settings(options): """"""Load Sopel's settings using the command line's ``options``. :param options: parsed arguments :return: sopel configuration :rtype: :class:`sopel.config.Config` :raise sopel.config.NotFound: raised when configuration file is not found :raise sopel.config.ConfigurationError: raised when configuration is invalid This function load Sopel settings from one of these sources: * value of ``options.config``, if given, * or the ``default`` configuration is loaded, then loads the settings and returns it as a :class:`~sopel.config.Config` object. If the configuration file can not be found, a :exc:`sopel.config.NotFound` error will be raised. .. note:: To use this function effectively, the :func:`sopel.cli.utils.add_config_arguments` function should be used to add the proper option to the argument parser. """""" name = options.config or 'default' filename = find_config(config.DEFAULT_HOMEDIR, name) if not os.path.isfile(filename): raise config.NotFound(filename=filename) return config.Config(filename) " 35865,"def test_nearest_dataset_is_itself(): """"""Tests whether the nearest dataset is itself"""""" for test_dataset in ['vote', 'analcatdata_aids', 'car']: df = fetch_data(test_dataset, local_cache_dir='../datasets') assert(fetch_nearest_dataset_names(df) == test_dataset) ","def test_nearest_dataset_is_itself(): """"""Tests whether the nearest dataset is itself"""""" for test_dataset in ['vote', 'analcatdata_aids', 'car']: df = fetch_data(test_dataset, local_cache_dir='../datasets') assert(nearest_datasets(df) == test_dataset) " 42392,"def test_ax_provided(rgb_image): """"""Test to ensure the plot works when an explicit axis is provided"""""" f, ax1 = plt.subplots() f, ax = plot_rgb(rgb_image, ax=ax1) rgb_im_shape = rgb_image.transpose([1, 2, 0]).shape the_plot_im_shape = ax.get_images()[0].get_array().shape assert rgb_im_shape == the_plot_im_shape plt.close(f) ","def test_ax_provided(rgb_image): """"""Test to ensure the plot works when an explicit axis is provided"""""" _, ax1 = plt.subplots() f, ax = plot_rgb(rgb_image, ax=ax1) rgb_im_shape = rgb_image.transpose([1, 2, 0]).shape the_plot_im_shape = ax.get_images()[0].get_array().shape assert rgb_im_shape == the_plot_im_shape plt.close(f) " 43167,"def load_partition_feats(part_config, part_id): ''' Load feat data of a partition from the data path. Parameters ---------- part_config : str The path of the partition config file. part_id : int The partition ID. Returns ------- Dict[str, Tensor] Node features. Dict[str, Tensor] Edge features. ''' config_path = os.path.dirname(part_config) relative_to_config = lambda path: os.path.join(config_path, path) with open(part_config) as conf_f: part_metadata = json.load(conf_f) assert 'part-{}'.format(part_id) in part_metadata, ""part-{} does not exist"".format(part_id) part_files = part_metadata['part-{}'.format(part_id)] assert 'node_feats' in part_files, ""the partition does not contain node features."" assert 'edge_feats' in part_files, ""the partition does not contain edge feature."" node_feats = load_tensors(relative_to_config(part_files['node_feats'])) edge_feats = load_tensors(relative_to_config(part_files['edge_feats'])) # In the old format, the feature name doesn't contain node/edge type. # For compatibility, let's add node/edge types to the feature names. node_feats1 = {} edge_feats1 = {} for name in node_feats: feat = node_feats[name] if name.find('/') == -1: name = '_N/' + name node_feats1[name] = feat for name in edge_feats: feat = edge_feats[name] if name.find('/') == -1: name = '_E/' + name edge_feats1[name] = feat node_feats = node_feats1 edge_feats = edge_feats1 return node_feats, edge_feats ","def load_partition_feats(part_config, part_id): '''Load node/edge feature data from a partition. Parameters ---------- part_config : str The path of the partition config file. part_id : int The partition ID. Returns ------- Dict[str, Tensor] Node features. Dict[str, Tensor] Edge features. ''' config_path = os.path.dirname(part_config) relative_to_config = lambda path: os.path.join(config_path, path) with open(part_config) as conf_f: part_metadata = json.load(conf_f) assert 'part-{}'.format(part_id) in part_metadata, ""part-{} does not exist"".format(part_id) part_files = part_metadata['part-{}'.format(part_id)] assert 'node_feats' in part_files, ""the partition does not contain node features."" assert 'edge_feats' in part_files, ""the partition does not contain edge feature."" node_feats = load_tensors(relative_to_config(part_files['node_feats'])) edge_feats = load_tensors(relative_to_config(part_files['edge_feats'])) # In the old format, the feature name doesn't contain node/edge type. # For compatibility, let's add node/edge types to the feature names. node_feats1 = {} edge_feats1 = {} for name in node_feats: feat = node_feats[name] if name.find('/') == -1: name = '_N/' + name node_feats1[name] = feat for name in edge_feats: feat = edge_feats[name] if name.find('/') == -1: name = '_E/' + name edge_feats1[name] = feat node_feats = node_feats1 edge_feats = edge_feats1 return node_feats, edge_feats " 12739,"def softwrap(s: str): """"""Turns a multiline string into a softwrapped string. Applies the following rules: - Dedents the text (you also don't need to start your string with a backslash) - Replaces singular newlines with a space (to turn a parapgraph into one long line) - Double-newlines are preserved - Extra indentation is preserved, and also preserves the indented line's ending """""" # If callers didnt use a leading ""\"" thats OK. if s[0] == ""\n"": s = s[1:] lines = textwrap.dedent(s).splitlines(keepends=True) # NB: collecting a list of strs and `"""".join` is more performant than calling `+=` repeatedly. result_strs = [] for i, line in enumerate(lines): next_line = lines[i + 1] if i + 1 < len(lines) else None if ""\n"" in (line, next_line) or line.startswith("" ""): result_strs.append(line) else: result_strs.append(line.rstrip()) result_strs.append("" "") return """".join(result_strs).rstrip() ","def softwrap(s: str): """"""Turns a multiline string into a softwrapped string. Applies the following rules: - Dedents the text (you also don't need to start your string with a backslash) - Replaces singular newlines with a space (to turn a paragraph into one long line) - Double-newlines are preserved - Extra indentation is preserved, and also preserves the indented line's ending """""" # If callers didnt use a leading ""\"" thats OK. if s[0] == ""\n"": s = s[1:] lines = textwrap.dedent(s).splitlines(keepends=True) # NB: collecting a list of strs and `"""".join` is more performant than calling `+=` repeatedly. result_strs = [] for i, line in enumerate(lines): next_line = lines[i + 1] if i + 1 < len(lines) else None if ""\n"" in (line, next_line) or line.startswith("" ""): result_strs.append(line) else: result_strs.append(line.rstrip()) result_strs.append("" "") return """".join(result_strs).rstrip() " 279,"def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and `effective_n`. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace ","def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, **kwargs): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See `discard_tuned_samples`. step : function or iterable of functions A step function or collection of functions. If there are variables without a step methods, step methods for those variables will be assigned automatically. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can overwrite the default. For 'SMC' step method, `start` should be a list of dicts of length = `chains`. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number `chain`. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). Ignored when using 'SMC' as step method. chain_idx : int Chain number used to store sample in backend. If `chains` is greater than one, chain numbers will start here. Ignored when using 'SMC' as step method. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If `None`, then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the number of draws. cores : int The number of chains to run in parallel. If `None`, set to the number of CPUs in the system, but at most 4. When using 'SMC', this parameter will be ignored if running with `pm.SMC(parallel=False)`. Keep in mind that some chains might themselves be multithreaded via openmp or BLAS. In those cases it might be faster to set this to 1. tune : int Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the `draws` argument, and will be discarded unless `discard_tuned_samples` is set to False. progressbar : bool Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in `with` context) random_seed : int or list of ints A list is accepted if `cores` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. Ignored when using 'SMC' compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``. Ignored when using 'SMC' Returns ------- trace : pymc3.backends.base.MultiTrace A `MultiTrace` object that contains the samples. Notes ----- Optional keyword arguments can be passed to `sample` to be delivered to the `step_method`s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop('nuts_kwargs', None) if nuts_kwargs is not None: warnings.warn(""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop('step_kwargs', None) if step_kwargs is not None: warnings.warn(""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if isinstance(step, pm.step_methods.smc.SMC): trace = smc.sample_smc(draws=draws, step=step, start=start, cores=cores, progressbar=progressbar, model=model, random_seed=random_seed) else: if 'njobs' in kwargs: cores = kwargs['njobs'] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning) if 'nchains' in kwargs: chains = kwargs['nchains'] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError( 'Invalid value for `random_seed`. Must be tuple, list or int') if 'chain' in kwargs: chain_idx = kwargs['chain'] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError('The model does not contain any free variables.') if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info('Auto-assigning NUTS sampler...') start_, step = init_nuts(init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug('Exception in init nuts', exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = {'draws': draws, 'step': step, 'start': start, 'trace': trace, 'chain': chain_idx, 'chains': chains, 'tune': tune, 'progressbar': progressbar, 'model': model, 'random_seed': random_seed, 'cores': cores, } sample_args.update(kwargs) has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step])]) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug('Pickling error:', exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: _log.info('Population sampling ({} chains)'.format(chains)) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info('Sequential sampling ({} chains in 1 job)'.format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws-tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace " 41668,"def build_packages(packagesdir, outputdir, args): pkg_map: Dict[str, Package] = {} packages: Optional[Set[str]] = common._parse_package_subset(args.only) if packages is None: packages = set( str(x) for x in packagesdir.iterdir() if (x / ""meta.yaml"").is_file() ) # Generate Package objects for all specified packages and recursive # dependencies. while packages: pkgname = packages.pop() if pkg_map.get(pkgname) is not None: continue pkg = Package(packagesdir / pkgname) pkg_map[pkg.name] = pkg for dep in pkg.dependencies: # This won't catch all duplicates but let's try our best if pkg_map.get(dep) is None: packages.add(dep) # Build set of dependents for pkg in pkg_map.values(): for dep in pkg.dependencies: pkg_map[dep].dependents.add(pkg.name) # Insert packages into build_queue. We *must* do this after counting # dependents, because the ordering ought not to change after insertion. build_queue: PriorirtyQueue = PriorityQueue() for pkg in pkg_map.values(): if len(pkg.dependencies) == 0: build_queue.put(pkg) built_queue = Queue() def builder(n): print(f""Starting thread {n}"") while True: pkg = build_queue.get() print(f""Thread {n} building {pkg.name}"") pkg.build(outputdir, args) print(f""Thread {n} built {pkg.name}"") built_queue.put(pkg) # Release the GIL so new packages get queued sleep(0.01) for n in range(0, args.num_threads): Thread(target=builder, args=(n + 1,), daemon=True).start() num_built = 0 while num_built < len(pkg_map): pkg = built_queue.get() num_built += 1 for dependent in pkg.dependents: dependent = pkg_map[dependent] dependent.unbuilt_dependencies.remove(pkg.name) if len(dependent.unbuilt_dependencies) == 0: build_queue.put(dependent) assert len(pkg_map) == num_built # Build package.json data. The ""test"" package is built in a different way, # so we hardcode its existence here. # # This is done last so the Makefile can use it as a completion token. package_data = { ""dependencies"": {""test"": []}, ""import_name_to_package_name"": {}, } for name, pkg in pkg_map.items(): package_data[""dependencies""][name] = pkg.dependencies for imp in pkg.meta.get(""test"", {}).get(""imports"", [name]): package_data[""import_name_to_package_name""][imp] = name with open(outputdir / ""packages.json"", ""w"") as fd: json.dump(package_data, fd) ","def build_packages(packagesdir: Path, outputdir: Path, args) -> None: pkg_map: Dict[str, Package] = {} packages: Optional[Set[str]] = common._parse_package_subset(args.only) if packages is None: packages = set( str(x) for x in packagesdir.iterdir() if (x / ""meta.yaml"").is_file() ) # Generate Package objects for all specified packages and recursive # dependencies. while packages: pkgname = packages.pop() if pkg_map.get(pkgname) is not None: continue pkg = Package(packagesdir / pkgname) pkg_map[pkg.name] = pkg for dep in pkg.dependencies: # This won't catch all duplicates but let's try our best if pkg_map.get(dep) is None: packages.add(dep) # Build set of dependents for pkg in pkg_map.values(): for dep in pkg.dependencies: pkg_map[dep].dependents.add(pkg.name) # Insert packages into build_queue. We *must* do this after counting # dependents, because the ordering ought not to change after insertion. build_queue: PriorirtyQueue = PriorityQueue() for pkg in pkg_map.values(): if len(pkg.dependencies) == 0: build_queue.put(pkg) built_queue = Queue() def builder(n): print(f""Starting thread {n}"") while True: pkg = build_queue.get() print(f""Thread {n} building {pkg.name}"") pkg.build(outputdir, args) print(f""Thread {n} built {pkg.name}"") built_queue.put(pkg) # Release the GIL so new packages get queued sleep(0.01) for n in range(0, args.num_threads): Thread(target=builder, args=(n + 1,), daemon=True).start() num_built = 0 while num_built < len(pkg_map): pkg = built_queue.get() num_built += 1 for dependent in pkg.dependents: dependent = pkg_map[dependent] dependent.unbuilt_dependencies.remove(pkg.name) if len(dependent.unbuilt_dependencies) == 0: build_queue.put(dependent) assert len(pkg_map) == num_built # Build package.json data. The ""test"" package is built in a different way, # so we hardcode its existence here. # # This is done last so the Makefile can use it as a completion token. package_data = { ""dependencies"": {""test"": []}, ""import_name_to_package_name"": {}, } for name, pkg in pkg_map.items(): package_data[""dependencies""][name] = pkg.dependencies for imp in pkg.meta.get(""test"", {}).get(""imports"", [name]): package_data[""import_name_to_package_name""][imp] = name with open(outputdir / ""packages.json"", ""w"") as fd: json.dump(package_data, fd) " 49896,"def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585): """""" Determine the incidence angle modifiers (iam) for direct, diffuse sky, and ground-reflected radiation using the FEDIS transmittance model. The ""Fresnel Equations"" for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission based on the rigorous integration of an alternate form of the Fresnel equations. The approach leads to a simple yet accurate relative transmittance model that reconciles the solar energy sensed by pyranometers and PV panels. Parameters ---------- aoi : numeric Angle of incidence. [degrees] surface_tilt : numeric Surface tilt angle measured from horizontal (e.g. surface facing up = 0, surface facing horizon = 90). [degrees] n : float, default 1.5 Refractive index of the PV cover. The default value of 1.5 was used for an IMT reference cell in [1]_. [unitless] n_ref : float, default 1.4585 Refractive index of the pyranometer cover. The default value was used for a fused silica dome over a CMP22 in [1]_. Returns ------- iam : dict IAM values for each type of irradiance: * 'direct': radiation from the solar disc * 'sky': radiation from the sky dome (zenith <= 90) * 'ground': radiation reflected from the ground (zenith >= 90) Notes ----- This implementation corrects a typo in the reference regarding the sign of the last polynomial term in Equation 5. References ---------- .. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, ""The 'Fresnel Equations' for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)"", Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022. :doi:`10.1016/j.rser.2022.112362` """""" # avoid undefined results for horizontal or upside-down surfaces zeroang = 1e-06 surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt) surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt) # and for aoi: aoi = np.where(aoi <= 0, zeroang, aoi) # similar for AOI > 90 aoi = np.where(aoi >= 90, 90 - zeroang, aoi) # angle between module normal and refracted ray: theta_0tp = asind(sind(aoi) / n) # Eq 3c # reflectance of direct radiation on PV cover: sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2 tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2 rd = sin_term + tan_term # Eq 3b # reflectance on pyranometer cover: r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e # relative transmittance of direct radiation by PV cover: cd = (1 - rd) / (1 - r0) # Eq 3a # weighting function term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2) # note: the last coefficient here differs in sign from the reference polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060] term2 = np.polynomial.polynomial.polyval(n, polycoeffs) w = term1 * term2 # Eq 5 # relative transmittance of sky diffuse radiation by PV cover: cosB = cosd(surface_tilt) sinB = sind(surface_tilt) cuk = (2*w / (np.pi * (1 + cosB))) * ( (30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB + (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3 - (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5 ) # Eq 4 # relative transmittance of ground-reflected radiation by PV cover: cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6 # handle tilt=0 case correctly: cug = np.where(surface_tilt == zeroang, 0, cug) out = { 'direct': cd, 'sky': cuk, 'ground': cug, } return out ","def fedis(aoi, surface_tilt, n=1.5, n_ref=1.4585): """""" Determine the incidence angle modifiers (iam) for direct, diffuse sky, and ground-reflected radiation using the FEDIS transmittance model. The ""Fresnel Equations"" for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS) [1]_ is an analytical solution of diffuse transmission based on the rigorous integration of an alternate form of the Fresnel equations. The approach leads to a simple yet accurate relative transmittance model that reconciles the solar energy sensed by pyranometers and PV panels. Parameters ---------- aoi : numeric Angle of incidence. [degrees] surface_tilt : numeric Surface tilt angle measured from horizontal (e.g. surface facing up = 0, surface facing horizon = 90). [degrees] n : float, default 1.5 Refractive index of the PV cover. The default value of 1.5 was used for an IMT reference cell in [1]_. [unitless] n_ref : float, default 1.4585 Refractive index of the pyranometer cover. The default value was used for a fused silica dome over a CMP22 in [1]_. Returns ------- iam : dict IAM values for each type of irradiance: * 'direct': radiation from the solar disc * 'sky': diffuse radiation from the sky dome * 'ground': radiation reflected from the ground Notes ----- This implementation corrects a typo in the reference regarding the sign of the last polynomial term in Equation 5. References ---------- .. [1] Xie, Y., M. Sengupta, A. Habte, A. Andreas, ""The 'Fresnel Equations' for Diffuse radiation on Inclined photovoltaic Surfaces (FEDIS)"", Renewable and Sustainable Energy Reviews, vol. 161, 112362. June 2022. :doi:`10.1016/j.rser.2022.112362` """""" # avoid undefined results for horizontal or upside-down surfaces zeroang = 1e-06 surface_tilt = np.where(surface_tilt == 0, zeroang, surface_tilt) surface_tilt = np.where(surface_tilt >= 90, 90 - zeroang, surface_tilt) # and for aoi: aoi = np.where(aoi <= 0, zeroang, aoi) # similar for AOI > 90 aoi = np.where(aoi >= 90, 90 - zeroang, aoi) # angle between module normal and refracted ray: theta_0tp = asind(sind(aoi) / n) # Eq 3c # reflectance of direct radiation on PV cover: sin_term = sind(aoi - theta_0tp)**2 / sind(aoi + theta_0tp)**2 / 2 tan_term = tand(aoi - theta_0tp)**2 / tand(aoi + theta_0tp)**2 / 2 rd = sin_term + tan_term # Eq 3b # reflectance on pyranometer cover: r0 = ((n_ref-1.0)/(n_ref+1.0))**2.0 # Eq 3e # relative transmittance of direct radiation by PV cover: cd = (1 - rd) / (1 - r0) # Eq 3a # weighting function term1 = n*(n_ref+1)**2 / (n_ref*(n+1)**2) # note: the last coefficient here differs in sign from the reference polycoeffs = [2.77526e-09, 3.74953, -5.18727, 3.41186, -1.08794, 0.136060] term2 = np.polynomial.polynomial.polyval(n, polycoeffs) w = term1 * term2 # Eq 5 # relative transmittance of sky diffuse radiation by PV cover: cosB = cosd(surface_tilt) sinB = sind(surface_tilt) cuk = (2*w / (np.pi * (1 + cosB))) * ( (30/7)*np.pi - (160/21)*np.radians(surface_tilt) - (10/3)*np.pi*cosB + (160/21)*cosB*sinB - (5/3)*np.pi*cosB*sinB**2 + (20/7)*cosB*sinB**3 - (5/16)*np.pi*cosB*sinB**4 + (16/105)*cosB*sinB**5 ) # Eq 4 # relative transmittance of ground-reflected radiation by PV cover: cug = 40 * w / (21 * (1 - cosB)) - (1 + cosB) / (1 - cosB) * cuk # Eq 6 # handle tilt=0 case correctly: cug = np.where(surface_tilt == zeroang, 0, cug) out = { 'direct': cd, 'sky': cuk, 'ground': cug, } return out " 4543,"def mean_img(imgs, target_affine=None, target_shape=None, verbose=0, n_jobs=1): """"""Compute the mean of the images over time. Note that if list of 4D images are given, the mean of each 4D image is computed separately, and the resulting mean is computed after. Parameters ---------- imgs : Niimg-like object or iterable of Niimg-like objects Images to be averaged over time (see http://nilearn.github.io/manipulating_images/input_output.html) target_affine : :class:`numpy.ndarray`, optional If specified, the image is resampled corresponding to this new affine. target_affine can be a 3x3 or a 4x4 matrix. target_shape : :obj:`tuple` or :obj:`list`, optional If specified, the image will be resized to match this new shape. len(target_shape) must be equal to 3. A target_affine has to be specified jointly with target_shape. verbose : :obj:`int`, optional Controls the amount of verbosity: higher numbers give more messages (0 means no messages). n_jobs : :obj:`int`, optional The number of CPUs to use to do the computation (-1 means 'all CPUs'). Returns ------- :class:`~nibabel.nifti1.Nifti1Image` Mean image. See Also -------- nilearn.image.math_img : For more general operations on images. """""" is_str = isinstance(imgs, str) is_iterable = isinstance(imgs, collections.abc.Iterable) if is_str or not is_iterable: imgs = [imgs, ] imgs_iter = iter(imgs) first_img = check_niimg(next(imgs_iter)) # Compute the first mean to retrieve the reference # target_affine and target_shape if_needed n_imgs = 1 running_mean, first_affine = _compute_mean(first_img, target_affine=target_affine, target_shape=target_shape) if target_affine is None or target_shape is None: target_affine = first_affine target_shape = running_mean.shape[:3] for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_compute_mean)(n, target_affine=target_affine, target_shape=target_shape) for n in imgs_iter): n_imgs += 1 # _compute_mean returns (mean_img, affine) this_mean = this_mean[0] running_mean += this_mean running_mean = running_mean / float(n_imgs) return new_img_like(first_img, running_mean, target_affine) ","def mean_img(imgs, target_affine=None, target_shape=None, verbose=0, n_jobs=1): """"""Compute the mean of the images over time or the 4th dimension. Note that if list of 4D images are given, the mean of each 4D image is computed separately, and the resulting mean is computed after. Parameters ---------- imgs : Niimg-like object or iterable of Niimg-like objects Images to be averaged over time (see http://nilearn.github.io/manipulating_images/input_output.html) target_affine : :class:`numpy.ndarray`, optional If specified, the image is resampled corresponding to this new affine. target_affine can be a 3x3 or a 4x4 matrix. target_shape : :obj:`tuple` or :obj:`list`, optional If specified, the image will be resized to match this new shape. len(target_shape) must be equal to 3. A target_affine has to be specified jointly with target_shape. verbose : :obj:`int`, optional Controls the amount of verbosity: higher numbers give more messages (0 means no messages). n_jobs : :obj:`int`, optional The number of CPUs to use to do the computation (-1 means 'all CPUs'). Returns ------- :class:`~nibabel.nifti1.Nifti1Image` Mean image. See Also -------- nilearn.image.math_img : For more general operations on images. """""" is_str = isinstance(imgs, str) is_iterable = isinstance(imgs, collections.abc.Iterable) if is_str or not is_iterable: imgs = [imgs, ] imgs_iter = iter(imgs) first_img = check_niimg(next(imgs_iter)) # Compute the first mean to retrieve the reference # target_affine and target_shape if_needed n_imgs = 1 running_mean, first_affine = _compute_mean(first_img, target_affine=target_affine, target_shape=target_shape) if target_affine is None or target_shape is None: target_affine = first_affine target_shape = running_mean.shape[:3] for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_compute_mean)(n, target_affine=target_affine, target_shape=target_shape) for n in imgs_iter): n_imgs += 1 # _compute_mean returns (mean_img, affine) this_mean = this_mean[0] running_mean += this_mean running_mean = running_mean / float(n_imgs) return new_img_like(first_img, running_mean, target_affine) " 32768,"def _set_patched(obj, state): setattr(obj, 'datadog_patch', state) ","def _set_patched(obj, state): setattr(obj, '__datadog_patch', state) " 8768,"def parse_insta_json(json): # Parse JSON content needed = _get_json_data(json) dimensions = needed.get('dimensions', {}) owner = needed.get('owner', {}) # Build bot response parts = [] # Title if needed.get('is_video'): title = ""[insta] Video by "" else: title = ""[insta] Photo by "" # Author iuser = owner.get('username') ifname = owner.get('full_name') if ifname and iuser: parts.append('%s %s (@%s)' % (title, ifname, iuser)) elif iuser: parts.append('%s @%s' % (title, iuser)) elif ifname: parts.append('%s %s' % (title, ifname)) else: parts.append('%s unkown user' % title) # Media caption try: icap = needed['edge_media_to_caption']['edges'][0]['node']['text'] # Strip newlines icap = icap.replace('\n', ' ') # Truncate caption icap = (icap[:256] + '…') if len(icap) > 256 else icap except (KeyError, IndexError): icap = None if icap: parts.append(icap) # Media width and height iwidth = dimensions.get('width') or None iheight = dimensions.get('height') or None if iwidth and iheight: parts.append('%sx%s' % (iwidth, iheight)) # Likes ilikes = needed.get('edge_media_preview_like', {}).get('count') if ilikes: parts.append('Likes: {:,}'.format(ilikes)) # Comments icomms = needed.get('edge_media_to_parent_comment', {}).get('count') if icomms: parts.append('Comments: {:,}'.format(icomms)) # Publishing date idate = needed.get('taken_at_timestamp') if idate: dateformat = '%Y-%m-%d %H:%M:%S' pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat) parts.append('Uploaded: %s' % pubdate) # Build the message return ' | '.join(parts) ","def parse_insta_json(json): # Parse JSON content needed = _get_json_data(json) dimensions = needed.get('dimensions', {}) owner = needed.get('owner', {}) # Build bot response parts = [] # Title if needed.get('is_video'): title = ""[insta] Video by "" else: title = ""[insta] Photo by "" # Author iuser = owner.get('username') ifname = owner.get('full_name') if ifname and iuser: parts.append('%s %s (@%s)' % (title, ifname, iuser)) elif iuser: parts.append('%s @%s' % (title, iuser)) elif ifname: parts.append('%s %s' % (title, ifname)) else: parts.append('%s unknown user' % title) # Media caption try: icap = needed['edge_media_to_caption']['edges'][0]['node']['text'] # Strip newlines icap = icap.replace('\n', ' ') # Truncate caption icap = (icap[:256] + '…') if len(icap) > 256 else icap except (KeyError, IndexError): icap = None if icap: parts.append(icap) # Media width and height iwidth = dimensions.get('width') or None iheight = dimensions.get('height') or None if iwidth and iheight: parts.append('%sx%s' % (iwidth, iheight)) # Likes ilikes = needed.get('edge_media_preview_like', {}).get('count') if ilikes: parts.append('Likes: {:,}'.format(ilikes)) # Comments icomms = needed.get('edge_media_to_parent_comment', {}).get('count') if icomms: parts.append('Comments: {:,}'.format(icomms)) # Publishing date idate = needed.get('taken_at_timestamp') if idate: dateformat = '%Y-%m-%d %H:%M:%S' pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat) parts.append('Uploaded: %s' % pubdate) # Build the message return ' | '.join(parts) " 8420,"def test_true_exciser(): np.random.seed(84) spectral_axis = np.linspace(5000,5100,num=100)*u.AA flux = (np.random.randn(100) + 3) * u.Jy spec = Spectrum1D(flux=flux, spectral_axis = spectral_axis) region = SpectralRegion([(5005,5010), (5060,5065)]*u.AA) excised_spec = excise_regions(spec, region) assert len(excised_spec.spectral_axis) == len(spec.spectral_axis)-10 assert len(excised_spec.flux) == len(spec.flux)-10 assert np.isclose(excised_spec.flux.sum(), 243.2617*u.Jy, atol=0.001*u.Jy) ","def test_true_exciser(): np.random.seed(84) spectral_axis = np.linspace(5000,5100,num=100)*u.AA flux = (np.random.randn(100) + 3) * u.Jy spec = Spectrum1D(flux=flux, spectral_axis=spectral_axis) region = SpectralRegion([(5005,5010), (5060,5065)]*u.AA) excised_spec = excise_regions(spec, region) assert len(excised_spec.spectral_axis) == len(spec.spectral_axis)-10 assert len(excised_spec.flux) == len(spec.flux)-10 assert np.isclose(excised_spec.flux.sum(), 243.2617*u.Jy, atol=0.001*u.Jy) " 8808,"def test_isupport_apply_ci(): """"""Test removed parameters are case-insensitives."""""" instance = isupport.ISupport() new = instance.apply(awaylen=50, NICKLEN=31, channellen=16) new_removed = new.apply(**{ '-awaylen': None, '-NICKLEN': None, 'channellen': 24, }) assert 'AWAYLEN' in new assert 'AWAYLEN' not in new_removed assert 'NICKLEN' in new assert 'NICKLEN' not in new_removed assert 'CHANNELLEN' in new assert 'CHANNELLEN' in new_removed assert new['CHANNELLEN'] == 16 assert new_removed['CHANNELLEN'] == 24 new_removed_ci = new.apply(**{ '-AWAYLEN': None, '-nicklen': None, 'CHANNELLEN': 34, }) assert 'AWAYLEN' in new assert 'AWAYLEN' not in new_removed_ci assert 'NICKLEN' in new assert 'NICKLEN' not in new_removed_ci assert 'CHANNELLEN' in new assert 'CHANNELLEN' in new_removed_ci assert new['CHANNELLEN'] == 16 assert new_removed_ci['CHANNELLEN'] == 34 ","def test_isupport_apply_case_insensitive(): """"""Test removed parameters are case-insensitives."""""" instance = isupport.ISupport() new = instance.apply(awaylen=50, NICKLEN=31, channellen=16) new_removed = new.apply(**{ '-awaylen': None, '-NICKLEN': None, 'channellen': 24, }) assert 'AWAYLEN' in new assert 'AWAYLEN' not in new_removed assert 'NICKLEN' in new assert 'NICKLEN' not in new_removed assert 'CHANNELLEN' in new assert 'CHANNELLEN' in new_removed assert new['CHANNELLEN'] == 16 assert new_removed['CHANNELLEN'] == 24 new_removed_ci = new.apply(**{ '-AWAYLEN': None, '-nicklen': None, 'CHANNELLEN': 34, }) assert 'AWAYLEN' in new assert 'AWAYLEN' not in new_removed_ci assert 'NICKLEN' in new assert 'NICKLEN' not in new_removed_ci assert 'CHANNELLEN' in new assert 'CHANNELLEN' in new_removed_ci assert new['CHANNELLEN'] == 16 assert new_removed_ci['CHANNELLEN'] == 34 " 31660,"def main(): params = demisto.params() args = demisto.args() url = params.get('url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) headers = {} mock_data = str(args.get('mock-data', '')) if mock_data.lower() == ""true"": headers['Mock-Data'] = ""True"" headers['Authorization'] = f'Bearer {params[""api_key""]}' headers['Soar-Integration-Origin'] = ""Cortex XSOAR"" command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) commands = { 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case': check_the_status_of_an_action_requested_on_a_case_command, 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat': check_the_status_of_an_action_requested_on_a_threat_command, 'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security': get_a_list_of_abnormal_cases_identified_by_abnormal_security_command, 'abxcortexxsoar-get-a-list-of-threats': get_a_list_of_threats_command, 'abxcortexxsoar-get-details-of-a-threat': get_details_of_a_threat_command, 'abxcortexxsoar-get-details-of-an-abnormal-case': get_details_of_an_abnormal_case_command, 'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command, 'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security': manage_a_threat_identified_by_abnormal_security_command, 'abxcortexxsoar-manage-an-abnormal-case': manage_an_abnormal_case_command, 'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security': submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command, } if command == 'test-module': headers['Mock-Data'] = ""True"" test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) test_module(test_client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) ","def main(): params = demisto.params() args = demisto.args() url = params.get('url') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) headers = {} mock_data = str(args.get('mock-data', '')) if mock_data.lower() == ""true"": headers['Mock-Data'] = ""True"" headers['Authorization'] = f'Bearer {params[""api_key""]}' headers['Soar-Integration-Origin'] = ""Cortex XSOAR"" command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) commands = { 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case': check_the_status_of_an_action_requested_on_a_case_command, 'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat': check_the_status_of_an_action_requested_on_a_threat_command, 'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security': get_a_list_of_abnormal_cases_identified_by_abnormal_security_command, 'abxcortexxsoar-get-a-list-of-threats': get_a_list_of_threats_command, 'abxcortexxsoar-get-details-of-a-threat': get_details_of_a_threat_command, 'abxcortexxsoar-get-details-of-an-abnormal-case': get_details_of_an_abnormal_case_command, 'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command, 'abnormal-security-manage-threat-identified-by-abnormal-security': manage_a_threat_identified_by_abnormal_security_command, 'abxcortexxsoar-manage-an-abnormal-case': manage_an_abnormal_case_command, 'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security': submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command, } if command == 'test-module': headers['Mock-Data'] = ""True"" test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None) test_module(test_client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) " 56087,"def bbox2result(bboxes, labels, num_classes): """"""Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """""" if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] ","def bbox2result(bboxes, labels, num_classes): """"""Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """""" if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] " 2037,"def kmeans_plusplus(X, n_clusters, x_squared_norms=None, random_state=None, n_local_trials=None): """"""Add some documentation here """""" # Check parameters if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) elif len(x_squared_norms) != X.shape[0]: warnings.warn( ""x_squared_norms should be of length n_samples. "" ""computing default norms"" ) x_squared_norms = row_norms(X, squared=True) random_state = check_random_state(random_state) # Check data check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C') # Call private k-means++ centers, indices = _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials) if sp.issparse(centers): centers = centers.toarray() _validate_center_shape(X, n_clusters, centers) return centers, indices ","def kmeans_plusplus(X, n_clusters, *, x_squared_norms=None, random_state=None, n_local_trials=None): """"""Add some documentation here """""" # Check parameters if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) elif len(x_squared_norms) != X.shape[0]: warnings.warn( ""x_squared_norms should be of length n_samples. "" ""computing default norms"" ) x_squared_norms = row_norms(X, squared=True) random_state = check_random_state(random_state) # Check data check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C') # Call private k-means++ centers, indices = _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials) if sp.issparse(centers): centers = centers.toarray() _validate_center_shape(X, n_clusters, centers) return centers, indices " 34850,"def find_entities_in_training_example(example: Text) -> List[Dict[Text, Any]]: """"""Extracts entities from an intent example. Args: example: Intent example. Returns: Extracted entities. """""" entities = [] offset = 0 for match in re.finditer(ENTITY_REGEX, example): logger.debug(f""{match}"") if match.groupdict()[GROUP_ENTITY_DICT] or match.groupdict()[GROUP_ENTITY_TYPE]: entity_attributes = extract_entity_attributes(match) start_index = match.start() - offset end_index = start_index + len(entity_attributes.text) offset += len(match.group(0)) - len(entity_attributes.text) entity = rasa.shared.nlu.training_data.util.build_entity( start_index, end_index, entity_attributes.value, entity_attributes.type, entity_attributes.role, entity_attributes.group, ) entities.append(entity) else: entity_text = match.groupdict()[GROUP_ENTITY_TEXT] # iterate over the list start_index = match.start() - offset end_index = start_index + len(entity_text) offset += len(match.group(0)) - len(entity_text) for match_inner in re.finditer( SINGLE_ENTITY_DICT, match.groupdict()[GROUP_ENTITY_DICT_LIST] ): entity_attributes = extract_entity_attributes_from_dict( entity_text=entity_text, match=match_inner ) entity = rasa.shared.nlu.training_data.util.build_entity( start_index, end_index, entity_attributes.value, entity_attributes.type, entity_attributes.role, entity_attributes.group, ) entities.append(entity) return entities ","def find_entities_in_training_example(example: Text) -> List[Dict[Text, Any]]: """"""Extracts entities from an intent example. Args: example: Intent example. Returns: Extracted entities. """""" entities = [] offset = 0 for match in re.finditer(ENTITY_REGEX, example): logger.debug(f""Entity annotation regex match: {match}"") if match.groupdict()[GROUP_ENTITY_DICT] or match.groupdict()[GROUP_ENTITY_TYPE]: entity_attributes = extract_entity_attributes(match) start_index = match.start() - offset end_index = start_index + len(entity_attributes.text) offset += len(match.group(0)) - len(entity_attributes.text) entity = rasa.shared.nlu.training_data.util.build_entity( start_index, end_index, entity_attributes.value, entity_attributes.type, entity_attributes.role, entity_attributes.group, ) entities.append(entity) else: entity_text = match.groupdict()[GROUP_ENTITY_TEXT] # iterate over the list start_index = match.start() - offset end_index = start_index + len(entity_text) offset += len(match.group(0)) - len(entity_text) for match_inner in re.finditer( SINGLE_ENTITY_DICT, match.groupdict()[GROUP_ENTITY_DICT_LIST] ): entity_attributes = extract_entity_attributes_from_dict( entity_text=entity_text, match=match_inner ) entity = rasa.shared.nlu.training_data.util.build_entity( start_index, end_index, entity_attributes.value, entity_attributes.type, entity_attributes.role, entity_attributes.group, ) entities.append(entity) return entities " 39134,"def load_tedlium_item( fileid: str, line: int, path: str, ext_audio: str, ext_txt: str ) -> Tuple[Tensor, int, str, int, int, int]: transcript_path = os.path.join(path, ""stm/"", fileid) with open(transcript_path + ext_txt) as f: transcript = f.readlines()[line] talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split( "" "", 6 ) wave_path = os.path.join(path, ""sph/"", fileid) waveform, sample_rate = torchaudio.load(wave_path + ext_audio) print(wave_path + ext_audio) # Calculate indexes for start time and endtime start_time = int(float(start_time) * sample_rate) end_time = int(float(end_time) * sample_rate) print(start_time, end_time) waveform = waveform[:, start_time:end_time] return ( waveform, sample_rate, transcript, talk_id, speaker_id, identifier, transcript, ) ","def load_tedlium_item( fileid: str, line: int, path: str, ext_audio: str, ext_txt: str ) -> Tuple[Tensor, int, str, int, int, int]: transcript_path = os.path.join(path, ""stm/"", fileid) with open(transcript_path + ext_txt) as f: transcript = f.readlines()[line] talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split( "" "", 6 ) wave_path = os.path.join(path, ""sph"", fileid) waveform, sample_rate = torchaudio.load(wave_path + ext_audio) print(wave_path + ext_audio) # Calculate indexes for start time and endtime start_time = int(float(start_time) * sample_rate) end_time = int(float(end_time) * sample_rate) print(start_time, end_time) waveform = waveform[:, start_time:end_time] return ( waveform, sample_rate, transcript, talk_id, speaker_id, identifier, transcript, ) " 45702,"def _check_inputs(vil, rainrate, velocity, timesteps, ar_order): if len(vil.shape) != 3: raise ValueError( ""vil.shape = %s, but a three-dimensional array expected"" % str(vil.shape) ) if rainrate is not None: if len(rainrate.shape) != 2: raise ValueError( ""rainrate.shape = %s, but a two-dimensional array expected"" % str(rainrate.shape) ) if vil.shape[0] != ar_order + 2: raise ValueError( ""vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"" % (vil.shape[0], ar_order + 2) ) if len(velocity.shape) != 3: raise ValueError( ""velocity.shape = %s, but a three-dimensional array expected"" % str(velocity.shape) ) if isinstance(timesteps, list) and not sorted(timesteps) == timesteps: raise ValueError(""timesteps is not in ascending order"") ","def _check_inputs(vil, rainrate, velocity, timesteps, ar_order): if len(vil.shape) != 3: raise ValueError( ""vil.shape = %s, but a three-dimensional array expected"" % str(vil.shape) ) if rainrate is not None: if len(rainrate.shape) != 2: raise ValueError( ""rainrate.shape = %s, but a two-dimensional array expected"" % str(rainrate.shape) ) if vil.shape[0] != ar_order + 2: raise ValueError( ""vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"" % (vil.shape[0], ar_order + 2) ) if velocity.ndim != 3: raise ValueError( ""velocity.shape = %s, but a three-dimensional array expected"" % str(velocity.shape) ) if isinstance(timesteps, list) and not sorted(timesteps) == timesteps: raise ValueError(""timesteps is not in ascending order"") " 40179,"def _check_anndata_setup_equivalence( adata_source: Union[AnnData, dict], adata_target: AnnData ) -> bool: """""" Checks if target setup is equivalent to source. Parameters ---------- adata_source Either AnnData already setup or scvi_setup_dict as the source adata_target Target AnnData to check setup equivalence Returns ------- Whether the adata_target should be run through `transfer_anndata_setup` """""" if isinstance(adata_source, anndata.AnnData): _scvi_dict = adata_source.uns[""_scvi""] else: _scvi_dict = adata_source adata = adata_target stats = _scvi_dict[""summary_stats""] target_n_vars = adata.shape[1] error_msg = ( ""Number of {} in anndata different from initial anndata used for training."" ) if target_n_vars != stats[""n_vars""]: raise ValueError(error_msg.format(""vars"")) error_msg = ( ""There are more {} categories in the data than were originally registered. "" + ""Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."" ) self_categoricals = _scvi_dict[""categorical_mappings""] self_batch_mapping = self_categoricals[""_scvi_batch""][""mapping""] adata_categoricals = adata.uns[""_scvi""][""categorical_mappings""] adata_batch_mapping = adata_categoricals[""_scvi_batch""][""mapping""] # check if mappings are equal or needs transfer transfer_setup = _needs_transfer(self_batch_mapping, adata_batch_mapping, ""batch"") self_labels_mapping = self_categoricals[""_scvi_labels""][""mapping""] adata_labels_mapping = adata_categoricals[""_scvi_labels""][""mapping""] transfer_setup = transfer_setup or _needs_transfer( self_labels_mapping, adata_labels_mapping, ""label"" ) # validate any extra categoricals error_msg = ( ""Registered categorical key order mismatch between "" + ""the anndata used to train and the anndata passed in."" + ""Expected categories & order {}. Received {}.\n"" ) if ""extra_categoricals"" in _scvi_dict.keys(): target_dict = adata.uns[""_scvi""][""extra_categoricals""] source_dict = _scvi_dict[""extra_categoricals""] # check that order of keys setup is same if not np.array_equal(target_dict[""keys""], source_dict[""keys""]): raise ValueError(error_msg.format(source_dict[""keys""], target_dict[""keys""])) # check mappings are equivalent target_extra_cat_maps = adata.uns[""_scvi""][""extra_categoricals""][""mappings""] for key, val in source_dict[""mappings""].items(): target_map = target_extra_cat_maps[key] transfer_setup = transfer_setup or _needs_transfer(val, target_map, key) # validate any extra continuous covs if ""extra_continuous_keys"" in _scvi_dict.keys(): if ""extra_continuous_keys"" not in adata.uns[""_scvi""].keys(): raise ValueError('extra_continuous_keys not in adata.uns[""_scvi""]') target_cont_keys = adata.uns[""_scvi""][""extra_continuous_keys""] source_cont_keys = _scvi_dict[""extra_continuous_keys""] # check that order of keys setup is same if not np.array_equal(target_cont_keys[""keys""], source_cont_keys[""keys""]): raise ValueError( error_msg.format(source_cont_keys[""keys""], target_cont_keys[""keys""]) ) return transfer_setup ","def _check_anndata_setup_equivalence( adata_source: Union[AnnData, dict], adata_target: AnnData ) -> bool: """""" Checks if target setup is equivalent to source. Parameters ---------- adata_source Either AnnData already setup or scvi_setup_dict as the source adata_target Target AnnData to check setup equivalence Returns ------- Whether the adata_target should be run through `transfer_anndata_setup` """""" if isinstance(adata_source, anndata.AnnData): _scvi_dict = adata_source.uns[""_scvi""] else: _scvi_dict = adata_source adata = adata_target stats = _scvi_dict[""summary_stats""] target_n_vars = adata.shape[1] error_msg = ( ""Number of {} in anndata different from initial anndata used for training."" ) if target_n_vars != stats[""n_vars""]: raise ValueError(error_msg.format(""vars"")) error_msg = ( ""There are more {} categories in the data than were originally registered. "" + ""Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."" ) self_categoricals = _scvi_dict[""categorical_mappings""] self_batch_mapping = self_categoricals[""_scvi_batch""][""mapping""] adata_categoricals = adata.uns[""_scvi""][""categorical_mappings""] adata_batch_mapping = adata_categoricals[""_scvi_batch""][""mapping""] # check if mappings are equal or needs transfer transfer_setup = _needs_transfer(self_batch_mapping, adata_batch_mapping, ""batch"") self_labels_mapping = self_categoricals[""_scvi_labels""][""mapping""] adata_labels_mapping = adata_categoricals[""_scvi_labels""][""mapping""] transfer_setup = transfer_setup or _needs_transfer( self_labels_mapping, adata_labels_mapping, ""label"" ) # validate any extra categoricals error_msg = ( ""Registered categorical key order mismatch between "" + ""the anndata used to train and the anndata passed in."" + ""Expected categories & order {}. Received {}.\n"" ) if ""extra_categoricals"" in _scvi_dict.keys(): target_dict = adata.uns[""_scvi""][""extra_categoricals""] source_dict = _scvi_dict[""extra_categoricals""] # check that order of keys setup is same if not np.array_equal(target_dict[""keys""], source_dict[""keys""]): raise ValueError(error_msg.format(source_dict[""keys""], target_dict[""keys""])) # check mappings are equivalent target_extra_cat_maps = adata.uns[""_scvi""][""extra_categoricals""][""mappings""] for key, val in source_dict[""mappings""].items(): target_map = target_extra_cat_maps[key] transfer_setup = transfer_setup or _needs_transfer(val, target_map, key) # validate any extra continuous covs if ""extra_continuous_keys"" in _scvi_dict.keys(): if ""extra_continuous_keys"" not in adata.uns[""_scvi""].keys(): raise ValueError('extra_continuous_keys not in adata.uns[""_scvi""]') target_cont_keys = adata.uns[""_scvi""][""extra_continuous_keys""] source_cont_keys = _scvi_dict[""extra_continuous_keys""] # check that order of keys setup is same if not np.array_equal(target_cont_keys, source_cont_keys): raise ValueError( error_msg.format(source_cont_keys[""keys""], target_cont_keys[""keys""]) ) return transfer_setup " 51785,"def path_from_modules(modules, hint_name=None): """"""Inspect a list of TCL modules for entries that indicate the absolute path at which the library supported by said module can be found. Args: modules (list): module files to be loaded to get an external package hint_name (str): optional hint giving a name that likely maps is a better choice than choosing the last possible path. A good choice is the package name. E.g., if operating on an external mvapich, then give priority to modules with '.*mvapich.*' in their name. From these, always choose the last Returns: Guess of the prefix path where the package """""" assert isinstance(modules, list), 'the ""modules"" argument must be a list' remaining_modules = modules if hint_name: # first choices are those that have the hint in their names first_choice_modules = [m for m in modules if hint_name in m] best_choice = _path_from_modules_helper(first_choice_modules) # if we found something return it if best_choice: return best_choice # if we didn't succeed, then compute the remaining modules remaining_modules = [m for m in modules if m not in first_choice_modules] # search the final set of modules best_choice = _path_from_modules_helper(remaining_modules) return best_choice ","def path_from_modules(modules, hint_name=None): """"""Inspect a list of TCL modules for entries that indicate the absolute path at which the library supported by said module can be found. Args: modules (list): module files to be loaded to get an external package hint_name (str): optional hint giving a name that likely maps is a better choice than choosing the last possible path. A good choice is the package name. E.g., if operating on an external mvapich, then give priority to modules with '.*mvapich.*' in their name. From these, always choose the last Returns: Guess of the prefix path where the package """""" modules = list(modules) # defensive copy for generators if hint_name: # first choices are those that have the hint in their names first_choice_modules = [m for m in modules if hint_name in m] best_choice = _path_from_modules_helper(first_choice_modules) # if we found something return it if best_choice: return best_choice # if we didn't succeed, then compute the remaining modules modules = [m for m in modules if m not in first_choice_modules] # search the final set of modules best_choice = _path_from_modules_helper(modules) return best_choice " 34896,"def tile(data, reps): """"""Repeats the whole array multiple times. Parameters ---------- data : relay.Expr The input data to the operator. reps : tuple of int The number of times repeating the tensor a. .. note:: Each dim size of reps must be a positive integer. If reps has length d, the result will have dimension of max(d, a.ndim); If a.ndim < d, a is promoted to be d-dimensional by prepending new axes. If a.ndim ? d, reps is promoted to a.ndim by pre-pending 1's to it. Returns ------- ret : relay.Expr The computed result. """""" return _make.tile(data, reps) ","def tile(data, reps): """"""Repeats the whole array multiple times. Parameters ---------- data : relay.Expr The input data to the operator. reps : tuple of int The number of times repeating the tensor data. .. note:: Each dim size of reps must be a positive integer. If reps has length d, the result will have dimension of max(d, a.ndim); If a.ndim < d, a is promoted to be d-dimensional by prepending new axes. If a.ndim ? d, reps is promoted to a.ndim by pre-pending 1's to it. Returns ------- ret : relay.Expr The computed result. """""" return _make.tile(data, reps) " 31721,"def print_mirror_events_stats(context_data: dict, stage: str) -> List[str]: """"""Print debug message with information about mirroring events. Args: context_data: The integration context data. stage: A prefix for the debug message. Returns: The ids of the mirrored offenses being currently processed. """""" updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) resubmitted_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) not_updated_ids = [str(offense.get('id')) for offense in waiting_for_update] stats = [(str(offense.get('id')), len(offense.get('events', []))) for offense in updated] print_debug_msg(f""Mirror Events Stats: {stage}\n Updated Offenses (id, len(events)): {stats}"" f""\n Offenses ids waiting for update: {not_updated_ids}"" f""\n Resubmitted offenses: {resubmitted_ids}"") updated_ids = [offense_id for offense_id, events_num in stats] return list(set(not_updated_ids + updated_ids + resubmitted_ids)) ","def print_mirror_events_stats(context_data: dict, stage: str) -> List[str]: """"""Print debug message with information about mirroring events. Args: context_data: The integration context data. stage: A prefix for the debug message. Returns: The ids of the mirrored offenses being currently processed. """""" updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) resubmitted_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) not_updated_ids = [str(offense.get('id')) for offense in waiting_for_update] stats = [(str(offense.get('id')), len(offense.get('events', []))) for offense in updated] print_debug_msg(f""Mirror Events Stats: {stage}\n Updated Offenses (id, len(events)): {stats}"" f""\n Offenses ids waiting for update: {not_updated_ids}"" f""\n Resubmitted offenses: {resubmitted_ids}"") updated_ids = [offense_id for offense_id, events_num in stats] return set(not_updated_ids + updated_ids + resubmitted_ids) " 50899,"def get_platforms(config_entry): """"""Return the platforms beloging to a config_entry."""""" model = config_entry.data[CONF_MODEL] flow_type = config_entry.data[CONF_FLOW_TYPE] platforms = [] if flow_type == CONF_GATEWAY: platforms = GATEWAY_PLATFORMS elif flow_type == CONF_DEVICE: if model in MODELS_SWITCH: platforms = SWITCH_PLATFORMS elif model in MODELS_FAN: platforms = FAN_PLATFORMS elif model in MODELS_LIGHT: platforms = LIGHT_PLATFORMS for vacuum_model in MODELS_VACUUM: if model.startswith(vacuum_model): platforms = VACUUM_PLATFORMS for air_monitor_model in MODELS_AIR_MONITOR: if model.startswith(air_monitor_model): platforms = AIR_MONITOR_PLATFORMS return platforms ","def get_platforms(config_entry): """"""Return the platforms beloging to a config_entry."""""" model = config_entry.data[CONF_MODEL] flow_type = config_entry.data[CONF_FLOW_TYPE] if flow_type == CONF_GATEWAY: return GATEWAY_PLATFORMS if flow_type == CONF_DEVICE: if model in MODELS_SWITCH: return SWITCH_PLATFORMS if model in MODELS_FAN: return FAN_PLATFORMS if model in MODELS_LIGHT: return LIGHT_PLATFORMS for vacuum_model in MODELS_VACUUM: if model.startswith(vacuum_model): return VACUUM_PLATFORMS for air_monitor_model in MODELS_AIR_MONITOR: if model.startswith(air_monitor_model): return AIR_MONITOR_PLATFORMS return None " 24847,"def my_func(self) -> bool: """"""This is a docstring. :returns: Always False """""" return False ","def my_func(self) -> bool: """"""Sphinx missing return type with annotations :returns: Always False """""" return False " 53115,"def _fixBridgeFuncs(): """"""Appropriately set the return and argument types of all the access bridge dll functions """""" _fixBridgeFunc(None,'Windows_run') _fixBridgeFunc(None,'setFocusGainedFP',c_void_p) _fixBridgeFunc(None,'setPropertyNameChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyDescriptionChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyValueChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyStateChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyCaretChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyActiveDescendentChangeFP',c_void_p) _fixBridgeFunc(None,'releaseJavaObject',c_long,JOBJECT64) _fixBridgeFunc(BOOL,'getVersionInfo',POINTER(AccessBridgeVersionInfo),errcheck=True) _fixBridgeFunc(BOOL,'isJavaWindow',HWND) _fixBridgeFunc(BOOL,'isSameObject',c_long,JOBJECT64,JOBJECT64) _fixBridgeFunc(BOOL,'getAccessibleContextFromHWND',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(HWND,'getHWNDFromAccessibleContext',c_long,JOBJECT64,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextAt',c_long,JOBJECT64,jint,jint,POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextWithFocus',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextInfo',c_long,JOBJECT64,POINTER(AccessibleContextInfo),errcheck=True) _fixBridgeFunc(JOBJECT64,'getAccessibleChildFromContext',c_long,JOBJECT64,jint,errcheck=True) _fixBridgeFunc(JOBJECT64,'getAccessibleParentFromContext',c_long,JOBJECT64) _fixBridgeFunc(JOBJECT64,'getParentWithRole',c_long,JOBJECT64,POINTER(c_wchar)) _fixBridgeFunc(BOOL,'getAccessibleRelationSet',c_long,JOBJECT64,POINTER(AccessibleRelationSetInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextInfo',c_long,JOBJECT64,POINTER(AccessibleTextInfo),jint,jint,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextItems',c_long,JOBJECT64,POINTER(AccessibleTextItemsInfo),jint,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextSelectionInfo',c_long,JOBJECT64,POINTER(AccessibleTextSelectionInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextAttributes',c_long,JOBJECT64,jint,POINTER(AccessibleTextAttributesInfo),errcheck=True) _fixBridgeFunc( BOOL, 'getAccessibleTextRect', c_long, JOBJECT64, POINTER(AccessibleTextRectInfo), jint, errcheck=True ) _fixBridgeFunc(BOOL,'getAccessibleTextLineBounds',c_long,JOBJECT64,jint,POINTER(jint),POINTER(jint),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextRange',c_long,JOBJECT64,jint,jint,POINTER(c_char),c_short,errcheck=True) _fixBridgeFunc(BOOL,'getCurrentAccessibleValueFromContext',c_long,JOBJECT64,POINTER(c_wchar),c_short,errcheck=True) _fixBridgeFunc(BOOL,'selectTextRange',c_long,JOBJECT64,c_int,c_int,errcheck=True) _fixBridgeFunc(BOOL,'getTextAttributesInRange',c_long,JOBJECT64,c_int,c_int,POINTER(AccessibleTextAttributesInfo),POINTER(c_short),errcheck=True) _fixBridgeFunc(JOBJECT64,'getTopLevelObject',c_long,JOBJECT64,errcheck=True) _fixBridgeFunc(c_int,'getObjectDepth',c_long,JOBJECT64) _fixBridgeFunc(JOBJECT64,'getActiveDescendent',c_long,JOBJECT64) _fixBridgeFunc(BOOL,'requestFocus',c_long,JOBJECT64,errcheck=True) _fixBridgeFunc(BOOL,'setCaretPosition',c_long,JOBJECT64,c_int,errcheck=True) _fixBridgeFunc(BOOL,'getCaretLocation',c_long,JOBJECT64,POINTER(AccessibleTextRectInfo),jint,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleActions',c_long,JOBJECT64,POINTER(AccessibleActions),errcheck=True) _fixBridgeFunc(BOOL,'doAccessibleActions',c_long,JOBJECT64,POINTER(AccessibleActionsToDo),POINTER(jint),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTableInfo',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(BOOL,'getAccessibleTableCellInfo',c_long,AccessibleTable,jint,jint,POINTER(AccessibleTableCellInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTableRowHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(BOOL,'getAccessibleTableColumnHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(JOBJECT64,'getAccessibleTableRowDescription',c_long,JOBJECT64,jint) _fixBridgeFunc(JOBJECT64,'getAccessibleTableColumnDescription',c_long,JOBJECT64,jint) _fixBridgeFunc(jint,'getAccessibleTableRow',c_long,AccessibleTable,jint) _fixBridgeFunc(jint,'getAccessibleTableColumn',c_long,AccessibleTable,jint) _fixBridgeFunc(jint,'getAccessibleTableIndex',c_long,AccessibleTable,jint,jint) _fixBridgeFunc(BOOL,'getAccessibleKeyBindings',c_long,JOBJECT64,POINTER(AccessibleKeyBindings),errcheck=True) ","def _fixBridgeFuncs(): """"""Appropriately set the return and argument types of all the access bridge dll functions """""" _fixBridgeFunc(None,'Windows_run') _fixBridgeFunc(None,'setFocusGainedFP',c_void_p) _fixBridgeFunc(None,'setPropertyNameChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyDescriptionChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyValueChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyStateChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyCaretChangeFP',c_void_p) _fixBridgeFunc(None,'setPropertyActiveDescendentChangeFP',c_void_p) _fixBridgeFunc(None,'releaseJavaObject',c_long,JOBJECT64) _fixBridgeFunc(BOOL,'getVersionInfo',POINTER(AccessBridgeVersionInfo),errcheck=True) _fixBridgeFunc(BOOL,'isJavaWindow',HWND) _fixBridgeFunc(BOOL,'isSameObject',c_long,JOBJECT64,JOBJECT64) _fixBridgeFunc(BOOL,'getAccessibleContextFromHWND',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(HWND,'getHWNDFromAccessibleContext',c_long,JOBJECT64,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextAt',c_long,JOBJECT64,jint,jint,POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextWithFocus',HWND,POINTER(c_long),POINTER(JOBJECT64),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleContextInfo',c_long,JOBJECT64,POINTER(AccessibleContextInfo),errcheck=True) _fixBridgeFunc(JOBJECT64,'getAccessibleChildFromContext',c_long,JOBJECT64,jint,errcheck=True) _fixBridgeFunc(JOBJECT64,'getAccessibleParentFromContext',c_long,JOBJECT64) _fixBridgeFunc(JOBJECT64,'getParentWithRole',c_long,JOBJECT64,POINTER(c_wchar)) _fixBridgeFunc(BOOL,'getAccessibleRelationSet',c_long,JOBJECT64,POINTER(AccessibleRelationSetInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextInfo',c_long,JOBJECT64,POINTER(AccessibleTextInfo),jint,jint,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextItems',c_long,JOBJECT64,POINTER(AccessibleTextItemsInfo),jint,errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextSelectionInfo',c_long,JOBJECT64,POINTER(AccessibleTextSelectionInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextAttributes',c_long,JOBJECT64,jint,POINTER(AccessibleTextAttributesInfo),errcheck=True) _fixBridgeFunc( BOOL, 'getAccessibleTextRect', c_long, JOBJECT64, POINTER(AccessibleTextRectInfo), jint, errcheck=True ) _fixBridgeFunc(BOOL,'getAccessibleTextLineBounds',c_long,JOBJECT64,jint,POINTER(jint),POINTER(jint),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTextRange',c_long,JOBJECT64,jint,jint,POINTER(c_char),c_short,errcheck=True) _fixBridgeFunc(BOOL,'getCurrentAccessibleValueFromContext',c_long,JOBJECT64,POINTER(c_wchar),c_short,errcheck=True) _fixBridgeFunc(BOOL,'selectTextRange',c_long,JOBJECT64,c_int,c_int,errcheck=True) _fixBridgeFunc(BOOL,'getTextAttributesInRange',c_long,JOBJECT64,c_int,c_int,POINTER(AccessibleTextAttributesInfo),POINTER(c_short),errcheck=True) _fixBridgeFunc(JOBJECT64,'getTopLevelObject',c_long,JOBJECT64,errcheck=True) _fixBridgeFunc(c_int,'getObjectDepth',c_long,JOBJECT64) _fixBridgeFunc(JOBJECT64,'getActiveDescendent',c_long,JOBJECT64) _fixBridgeFunc(BOOL,'requestFocus',c_long,JOBJECT64,errcheck=True) if jabContext.hwnd: log.debugWarning(""Unable to find JAB Window"") return focus = api.getFocusObject() if isinstance(focus, NVDAObjects.JAB.JAB) and focus.jabContext == jabContext: obj = focus else: obj = NVDAObjects.JAB.JAB(jabContext=jabContext) if obj: eventHandler.queueEvent(""nameChange"", obj) _fixBridgeFunc(BOOL,'getAccessibleTableInfo',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(BOOL,'getAccessibleTableCellInfo',c_long,AccessibleTable,jint,jint,POINTER(AccessibleTableCellInfo),errcheck=True) _fixBridgeFunc(BOOL,'getAccessibleTableRowHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(BOOL,'getAccessibleTableColumnHeader',c_long,JOBJECT64,POINTER(AccessibleTableInfo)) _fixBridgeFunc(JOBJECT64,'getAccessibleTableRowDescription',c_long,JOBJECT64,jint) _fixBridgeFunc(JOBJECT64,'getAccessibleTableColumnDescription',c_long,JOBJECT64,jint) _fixBridgeFunc(jint,'getAccessibleTableRow',c_long,AccessibleTable,jint) _fixBridgeFunc(jint,'getAccessibleTableColumn',c_long,AccessibleTable,jint) _fixBridgeFunc(jint,'getAccessibleTableIndex',c_long,AccessibleTable,jint,jint) _fixBridgeFunc(BOOL,'getAccessibleKeyBindings',c_long,JOBJECT64,POINTER(AccessibleKeyBindings),errcheck=True) " 31916,"def list_attached_group_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) group_name = args.get('groupName', """") marker = args.get('marker', None) limit, is_manual, page_size = get_limit(args) kwargs = { 'GroupName': group_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_group_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size and len(data) > page_size: data = data[-1 * args.get('page_size'):] policy_data = [] for policy in data: policy_data.append({ 'GroupName': group_name, 'PolicyArn': policy.get('PolicyArn', ''), 'PolicyName': policy.get('PolicyName', '') }) ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && ' 'val.GroupName === obj.GroupName)': policy_data, 'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec) ","def list_attached_group_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) group_name = args.get('groupName', """") marker = args.get('marker', None) limit, is_manual, page_size = get_limit(args) kwargs = { 'GroupName': group_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_group_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size and len(data) > page_size: data = data[-1 * args.get('page_size'):] policy_data = [] for policy in data: policy_data.append({ 'GroupName': group_name, 'PolicyArn': policy.get('PolicyArn', ''), 'PolicyName': policy.get('PolicyName', '') }) ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && ' 'val.GroupName === obj.GroupName)': policy_data, 'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec, response) " 23677,"def pvfactors_timeseries( solar_azimuth, solar_zenith, surface_azimuth, surface_tilt, axis_azimuth, timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo, n_pvrows=3, index_observed_pvrow=1, rho_front_pvrow=0.03, rho_back_pvrow=0.05, horizon_band_angle=15.): """""" Calculate front and back surface plane-of-array irradiance on a fixed tilt or single-axis tracker PV array configuration, and using the open-source ""pvfactors"" package. pvfactors implements the model described in [1]_. Please refer to pvfactors online documentation for more details: https://sunpower.github.io/pvfactors/ Parameters ---------- solar_azimuth: numeric Sun's azimuth angles using pvlib's azimuth convention (deg) solar_zenith: numeric Sun's zenith angles (deg) surface_azimuth: numeric Azimuth angle of the front surface of the PV modules, using pvlib's convention (deg) surface_tilt: numeric Tilt angle of the PV modules, going from 0 to 180 (deg) axis_azimuth: float Azimuth angle of the rotation axis of the PV modules, using pvlib's convention (deg). This is supposed to be fixed for all timestamps. When modeling fixed-tilt arrays, set this value to be 90 degrees off from ``surface_azimuth``. timestamps: datetime or DatetimeIndex List of simulation timestamps dni: numeric Direct normal irradiance (W/m2) dhi: numeric Diffuse horizontal irradiance (W/m2) gcr: float Ground coverage ratio of the pv array pvrow_height: float Height of the pv rows, measured at their center (m) pvrow_width: float Width of the pv rows in the considered 2D plane (m) albedo: float Ground albedo n_pvrows: int, default 3 Number of PV rows to consider in the PV array index_observed_pvrow: int, default 1 Index of the PV row whose incident irradiance will be returned. Indices of PV rows go from 0 to n_pvrows-1. rho_front_pvrow: float, default 0.03 Front surface reflectivity of PV rows rho_back_pvrow: float, default 0.05 Back surface reflectivity of PV rows horizon_band_angle: float, default 15 Elevation angle of the sky dome's diffuse horizon band (deg) Returns ------- poa_front: numeric Calculated incident irradiance on the front surface of the PV modules (W/m2) poa_back: numeric Calculated incident irradiance on the back surface of the PV modules (W/m2) poa_front_absorbed: numeric Calculated absorbed irradiance on the front surface of the PV modules (W/m2), after AOI losses poa_back_absorbed: numeric Calculated absorbed irradiance on the back surface of the PV modules (W/m2), after AOI losses References ---------- .. [1] Anoma, Marc Abou, et al. ""View Factor Model and Validation for Bifacial PV and Diffuse Shade on Single-Axis Trackers."" 44th IEEE Photovoltaic Specialist Conference. 2017. """""" # Convert Series, list, float inputs to numpy arrays solar_azimuth = np.array(solar_azimuth) solar_zenith = np.array(solar_zenith) dni = np.array(dni) dhi = np.array(dhi) # GH 1127, GH 1332 surface_tilt = np.full_like(solar_zenith, surface_tilt) surface_azimuth = np.full_like(solar_zenith, surface_azimuth) # Import pvfactors functions for timeseries calculations. from pvfactors.run import run_timeseries_engine # Build up pv array configuration parameters pvarray_parameters = { 'n_pvrows': n_pvrows, 'axis_azimuth': axis_azimuth, 'pvrow_height': pvrow_height, 'pvrow_width': pvrow_width, 'gcr': gcr } irradiance_model_params = { 'rho_front': rho_front_pvrow, 'rho_back': rho_back_pvrow, 'horizon_band_angle': horizon_band_angle } # Create report function def fn_build_report(pvarray): return {'total_inc_back': pvarray.ts_pvrows[index_observed_pvrow] .back.get_param_weighted('qinc'), 'total_inc_front': pvarray.ts_pvrows[index_observed_pvrow] .front.get_param_weighted('qinc'), 'total_abs_back': pvarray.ts_pvrows[index_observed_pvrow] .back.get_param_weighted('qabs'), 'total_abs_front': pvarray.ts_pvrows[index_observed_pvrow] .front.get_param_weighted('qabs')} # Run pvfactors calculations report = run_timeseries_engine( fn_build_report, pvarray_parameters, timestamps, dni, dhi, solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, albedo, irradiance_model_params=irradiance_model_params) # Turn report into dataframe df_report = pd.DataFrame(report, index=timestamps) return (df_report.total_inc_front, df_report.total_inc_back, df_report.total_abs_front, df_report.total_abs_back) ","def pvfactors_timeseries( solar_azimuth, solar_zenith, surface_azimuth, surface_tilt, axis_azimuth, timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo, n_pvrows=3, index_observed_pvrow=1, rho_front_pvrow=0.03, rho_back_pvrow=0.05, horizon_band_angle=15.): """""" Calculate front and back surface plane-of-array irradiance on a fixed tilt or single-axis tracker PV array configuration, and using the open-source ""pvfactors"" package. pvfactors implements the model described in [1]_. Please refer to pvfactors online documentation for more details: https://sunpower.github.io/pvfactors/ Parameters ---------- solar_azimuth: numeric Sun's azimuth angles using pvlib's azimuth convention (deg) solar_zenith: numeric Sun's zenith angles (deg) surface_azimuth: numeric Azimuth angle of the front surface of the PV modules, using pvlib's convention (deg) surface_tilt: numeric Tilt angle of the PV modules, going from 0 to 180 (deg) axis_azimuth: float Azimuth angle of the rotation axis of the PV modules, using pvlib's convention (deg). This is supposed to be fixed for all timestamps. When modeling fixed-tilt arrays, set this value to be 90 degrees clockwise from ``surface_azimuth``. timestamps: datetime or DatetimeIndex List of simulation timestamps dni: numeric Direct normal irradiance (W/m2) dhi: numeric Diffuse horizontal irradiance (W/m2) gcr: float Ground coverage ratio of the pv array pvrow_height: float Height of the pv rows, measured at their center (m) pvrow_width: float Width of the pv rows in the considered 2D plane (m) albedo: float Ground albedo n_pvrows: int, default 3 Number of PV rows to consider in the PV array index_observed_pvrow: int, default 1 Index of the PV row whose incident irradiance will be returned. Indices of PV rows go from 0 to n_pvrows-1. rho_front_pvrow: float, default 0.03 Front surface reflectivity of PV rows rho_back_pvrow: float, default 0.05 Back surface reflectivity of PV rows horizon_band_angle: float, default 15 Elevation angle of the sky dome's diffuse horizon band (deg) Returns ------- poa_front: numeric Calculated incident irradiance on the front surface of the PV modules (W/m2) poa_back: numeric Calculated incident irradiance on the back surface of the PV modules (W/m2) poa_front_absorbed: numeric Calculated absorbed irradiance on the front surface of the PV modules (W/m2), after AOI losses poa_back_absorbed: numeric Calculated absorbed irradiance on the back surface of the PV modules (W/m2), after AOI losses References ---------- .. [1] Anoma, Marc Abou, et al. ""View Factor Model and Validation for Bifacial PV and Diffuse Shade on Single-Axis Trackers."" 44th IEEE Photovoltaic Specialist Conference. 2017. """""" # Convert Series, list, float inputs to numpy arrays solar_azimuth = np.array(solar_azimuth) solar_zenith = np.array(solar_zenith) dni = np.array(dni) dhi = np.array(dhi) # GH 1127, GH 1332 surface_tilt = np.full_like(solar_zenith, surface_tilt) surface_azimuth = np.full_like(solar_zenith, surface_azimuth) # Import pvfactors functions for timeseries calculations. from pvfactors.run import run_timeseries_engine # Build up pv array configuration parameters pvarray_parameters = { 'n_pvrows': n_pvrows, 'axis_azimuth': axis_azimuth, 'pvrow_height': pvrow_height, 'pvrow_width': pvrow_width, 'gcr': gcr } irradiance_model_params = { 'rho_front': rho_front_pvrow, 'rho_back': rho_back_pvrow, 'horizon_band_angle': horizon_band_angle } # Create report function def fn_build_report(pvarray): return {'total_inc_back': pvarray.ts_pvrows[index_observed_pvrow] .back.get_param_weighted('qinc'), 'total_inc_front': pvarray.ts_pvrows[index_observed_pvrow] .front.get_param_weighted('qinc'), 'total_abs_back': pvarray.ts_pvrows[index_observed_pvrow] .back.get_param_weighted('qabs'), 'total_abs_front': pvarray.ts_pvrows[index_observed_pvrow] .front.get_param_weighted('qabs')} # Run pvfactors calculations report = run_timeseries_engine( fn_build_report, pvarray_parameters, timestamps, dni, dhi, solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, albedo, irradiance_model_params=irradiance_model_params) # Turn report into dataframe df_report = pd.DataFrame(report, index=timestamps) return (df_report.total_inc_front, df_report.total_inc_back, df_report.total_abs_front, df_report.total_abs_back) " 36056,"def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument """"""Check that the instructions dict contains the necessary keywords"""""" instructions_dict = instructions.get_dict() retrieve_files = instructions_dict.get('retrieve_files', None) if retrieve_files is None: errmsg = ( '\n\n' 'no indication of what to do in the instruction node:\n > {}\n' '(to store the files in the repository set retrieve_files=True,\n' 'to copy them to the specified folder on the remote computer,\n' 'set it to False)\n' ) return errmsg.format(instructions.uuid) if not isinstance(retrieve_files, bool): errmsg = ( 'entry for retrieve files inside of instruction node {} must be\n' 'either True or False; instead, it is: {}' ) return errmsg.format(instructions.uuid, retrieve_files) local_files = instructions_dict.get('local_files', None) remote_files = instructions_dict.get('remote_files', None) symlink_files = instructions_dict.get('symlink_files', None) if not any([local_files, remote_files, symlink_files]): errmsg = ( 'no indication of which files to copy were found in the instruction node {}.\n' 'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n' 'These should be lists containing tuples following the pattern:\n' '[ ... (source_node_key, source_relpath, target_relpath) ... ] \n' ) return errmsg.format(instructions.uuid) ","def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument """"""Check that the instructions dict contains the necessary keywords"""""" instructions_dict = instructions.get_dict() retrieve_files = instructions_dict.get('retrieve_files', None) if retrieve_files is None: errmsg = ( '\n\n' 'no indication of what to do in the instruction node:\n > {}\n' '(to store the files in the repository set retrieve_files=True,\n' 'to copy them to the specified folder on the remote computer,\n' 'set it to False)\n' ) return errmsg.format(instructions.uuid) if not isinstance(retrieve_files, bool): errmsg = ( 'entry for retrieve files inside of instruction node {} must be\n' 'either True or False; instead, it is: {}' ) return errmsg.format(instructions.uuid, retrieve_files) local_files = instructions_dict.get('local_files', None) remote_files = instructions_dict.get('remote_files', None) symlink_files = instructions_dict.get('symlink_files', None) if not any([local_files, remote_files, symlink_files]): errmsg = ( 'no indication of which files to copy were found in the instruction node {}.\n' 'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n' 'These should be lists containing tuples with format: (source_node_key, source_relpath, target_relpath)\n' ) return errmsg.format(instructions.uuid) " 351,"def sample_prior_predictive( samples=500, model: Optional[Model] = None, var_names: Optional[Iterable[str]] = None, random_seed=None, mode: Optional[Union[str, Mode]] = None, return_inferencedata=None, idata_kwargs: dict = None, ) -> Union[InferenceData, Dict[str, np.ndarray]]: """"""Generate samples from the prior predictive distribution. Parameters ---------- samples : int Number of samples from the prior predictive to generate. Defaults to 500. model : Model (optional if in ``with`` context) var_names : Iterable[str] A list of names of variables for which to compute the posterior predictive samples. Defaults to both observed and unobserved RVs. Transformed values are not included unless explicitly defined in var_names. random_seed : int Seed for the random number generator. mode: The mode used by ``aesara.function`` to compile the graph. return_inferencedata : bool, default=True Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). Defaults to `True`. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` Returns ------- arviz.InferenceData or Dict An ArviZ ``InferenceData`` object containing the prior and prior predictive samples (default), or a dictionary with variable names as keys and samples as numpy arrays. """""" model = modelcontext(model) if return_inferencedata is None: return_inferencedata = True if model.potentials: warnings.warn( ""The effect of Potentials on other parameters is ignored during prior predictive sampling. "" ""This is likely to lead to invalid or biased predictive samples."", UserWarning, stacklevel=2, ) if var_names is None: prior_pred_vars = model.observed_RVs + model.auto_deterministics prior_vars = ( get_default_varnames(model.unobserved_RVs, include_transformed=True) + model.potentials ) vars_: Set[str] = {var.name for var in prior_vars + prior_pred_vars} else: vars_ = set(var_names) if random_seed is not None: warnings.warn( ""In this version, RNG seeding is managed by the Model objects. "" ""See the `rng_seeder` argument in Model's constructor."", DeprecationWarning, stacklevel=2, ) names = get_default_varnames(vars_, include_transformed=False) vars_to_sample = [model[name] for name in names] # Any variables from var_names that are missing must be transformed variables. # Misspelled variables would have raised a KeyError above. missing_names = vars_.difference(names) for name in missing_names: transformed_value_var = model[name] rv_var = model.values_to_rvs[transformed_value_var] transform = transformed_value_var.tag.transform transformed_rv_var = transform.forward(rv_var, rv_var) names.append(name) vars_to_sample.append(transformed_rv_var) # If the user asked for the transformed variable in var_names, but not the # original RV, we add it manually here if rv_var.name not in names: names.append(rv_var.name) vars_to_sample.append(rv_var) inputs = [i for i in inputvars(vars_to_sample) if not isinstance(i, SharedVariable)] sampler_fn = compile_rv_inplace( inputs, vars_to_sample, allow_input_downcast=True, accept_inplace=True, mode=mode ) values = zip(*(sampler_fn() for i in range(samples))) data = {k: np.stack(v) for k, v in zip(names, values)} if data is None: raise AssertionError(""No variables sampled: attempting to sample %s"" % names) prior: Dict[str, np.ndarray] = {} for var_name in vars_: if var_name in data: prior[var_name] = data[var_name] if not return_inferencedata: return prior ikwargs = dict(model=model) if idata_kwargs: ikwargs.update(idata_kwargs) return pm.to_inference_data(prior=prior, **ikwargs) ","def sample_prior_predictive( samples=500, model: Optional[Model] = None, var_names: Optional[Iterable[str]] = None, random_seed=None, mode: Optional[Union[str, Mode]] = None, return_inferencedata=True, idata_kwargs: dict = None, ) -> Union[InferenceData, Dict[str, np.ndarray]]: """"""Generate samples from the prior predictive distribution. Parameters ---------- samples : int Number of samples from the prior predictive to generate. Defaults to 500. model : Model (optional if in ``with`` context) var_names : Iterable[str] A list of names of variables for which to compute the posterior predictive samples. Defaults to both observed and unobserved RVs. Transformed values are not included unless explicitly defined in var_names. random_seed : int Seed for the random number generator. mode: The mode used by ``aesara.function`` to compile the graph. return_inferencedata : bool, default=True Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). Defaults to `True`. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` Returns ------- arviz.InferenceData or Dict An ArviZ ``InferenceData`` object containing the prior and prior predictive samples (default), or a dictionary with variable names as keys and samples as numpy arrays. """""" model = modelcontext(model) if return_inferencedata is None: return_inferencedata = True if model.potentials: warnings.warn( ""The effect of Potentials on other parameters is ignored during prior predictive sampling. "" ""This is likely to lead to invalid or biased predictive samples."", UserWarning, stacklevel=2, ) if var_names is None: prior_pred_vars = model.observed_RVs + model.auto_deterministics prior_vars = ( get_default_varnames(model.unobserved_RVs, include_transformed=True) + model.potentials ) vars_: Set[str] = {var.name for var in prior_vars + prior_pred_vars} else: vars_ = set(var_names) if random_seed is not None: warnings.warn( ""In this version, RNG seeding is managed by the Model objects. "" ""See the `rng_seeder` argument in Model's constructor."", DeprecationWarning, stacklevel=2, ) names = get_default_varnames(vars_, include_transformed=False) vars_to_sample = [model[name] for name in names] # Any variables from var_names that are missing must be transformed variables. # Misspelled variables would have raised a KeyError above. missing_names = vars_.difference(names) for name in missing_names: transformed_value_var = model[name] rv_var = model.values_to_rvs[transformed_value_var] transform = transformed_value_var.tag.transform transformed_rv_var = transform.forward(rv_var, rv_var) names.append(name) vars_to_sample.append(transformed_rv_var) # If the user asked for the transformed variable in var_names, but not the # original RV, we add it manually here if rv_var.name not in names: names.append(rv_var.name) vars_to_sample.append(rv_var) inputs = [i for i in inputvars(vars_to_sample) if not isinstance(i, SharedVariable)] sampler_fn = compile_rv_inplace( inputs, vars_to_sample, allow_input_downcast=True, accept_inplace=True, mode=mode ) values = zip(*(sampler_fn() for i in range(samples))) data = {k: np.stack(v) for k, v in zip(names, values)} if data is None: raise AssertionError(""No variables sampled: attempting to sample %s"" % names) prior: Dict[str, np.ndarray] = {} for var_name in vars_: if var_name in data: prior[var_name] = data[var_name] if not return_inferencedata: return prior ikwargs = dict(model=model) if idata_kwargs: ikwargs.update(idata_kwargs) return pm.to_inference_data(prior=prior, **ikwargs) " 51499,"def patch(patches_dir=PATCHES_DIR, verbose=False, test=False): """"""Patches the database schema based on the SETTINGS table Pulls the current patch from the settings table and applies all subsequent patches found in the patches directory. """""" # we are going to open and close 2 main transactions; this is a required # change since patch 68.sql where we transition to jsonb for all info # files. The 2 main transitions are: (1) get the current settings, # (2) each patch in their independent transaction with qdb.sql_connection.TRN: qdb.sql_connection.TRN.add(""SELECT current_patch FROM settings"") current_patch = qdb.sql_connection.TRN.execute_fetchlast() current_sql_patch_fp = join(patches_dir, current_patch) corresponding_py_patch = partial(join, patches_dir, 'python_patches') sql_glob = join(patches_dir, '*.sql') sql_patch_files = natsorted(glob(sql_glob)) if current_patch == 'unpatched': next_patch_index = 0 elif current_sql_patch_fp not in sql_patch_files: raise RuntimeError(""Cannot find patch file %s"" % current_patch) else: next_patch_index = sql_patch_files.index(current_sql_patch_fp) + 1 patch_update_sql = ""UPDATE settings SET current_patch = %s"" for sql_patch_fp in sql_patch_files[next_patch_index:]: sql_patch_filename = basename(sql_patch_fp) py_patch_fp = corresponding_py_patch( splitext(basename(sql_patch_fp))[0] + '.py') py_patch_filename = basename(py_patch_fp) # patch 43.sql is when we started testing patches, then in patch # 68.sql is when we transitioned to jsonb for the info files; let's do # this in its own transition if sql_patch_filename == '68.sql' and test: with qdb.sql_connection.TRN: _populate_test_db() with qdb.sql_connection.TRN: with open(sql_patch_fp, newline=None) as patch_file: if verbose: print('\tApplying patch %s...' % sql_patch_filename) qdb.sql_connection.TRN.add(patch_file.read()) qdb.sql_connection.TRN.add( patch_update_sql, [sql_patch_filename]) qdb.sql_connection.TRN.execute() if exists(py_patch_fp): if verbose: print('\t\tApplying python patch %s...' % py_patch_filename) with open(py_patch_fp) as py_patch: exec(py_patch.read(), globals()) # before moving to jsonb for sample/prep info files (patch 69.sql), # one of the patches used to regenerate the sample information file # for the test Study (1) so a lot of the tests actually expect this. # Now, trying to regenerate directly in the populate_test_db might # require too many dev hours so the easiest is just do it here # UPDATE 01/25/2021: moving to 81.sql as we timestamps to prep # info files if test and sql_patch_filename == '81.sql': qdb.study.Study(1).sample_template.generate_files() ","def patch(patches_dir=PATCHES_DIR, verbose=False, test=False): """"""Patches the database schema based on the SETTINGS table Pulls the current patch from the settings table and applies all subsequent patches found in the patches directory. """""" # we are going to open and close 2 main transactions; this is a required # change since patch 68.sql where we transition to jsonb for all info # files. The 2 main transitions are: (1) get the current settings, # (2) each patch in their independent transaction with qdb.sql_connection.TRN: qdb.sql_connection.TRN.add(""SELECT current_patch FROM settings"") current_patch = qdb.sql_connection.TRN.execute_fetchlast() current_sql_patch_fp = join(patches_dir, current_patch) corresponding_py_patch = partial(join, patches_dir, 'python_patches') sql_glob = join(patches_dir, '*.sql') sql_patch_files = natsorted(glob(sql_glob)) if current_patch == 'unpatched': next_patch_index = 0 elif current_sql_patch_fp not in sql_patch_files: raise RuntimeError(""Cannot find patch file %s"" % current_patch) else: next_patch_index = sql_patch_files.index(current_sql_patch_fp) + 1 patch_update_sql = ""UPDATE settings SET current_patch = %s"" for sql_patch_fp in sql_patch_files[next_patch_index:]: sql_patch_filename = basename(sql_patch_fp) py_patch_fp = corresponding_py_patch( splitext(basename(sql_patch_fp))[0] + '.py') py_patch_filename = basename(py_patch_fp) # patch 43.sql is when we started testing patches, then in patch # 68.sql is when we transitioned to jsonb for the info files; let's do # this in its own transition if sql_patch_filename == '68.sql' and test: with qdb.sql_connection.TRN: _populate_test_db() with qdb.sql_connection.TRN: with open(sql_patch_fp, newline=None) as patch_file: if verbose: print('\tApplying patch %s...' % sql_patch_filename) qdb.sql_connection.TRN.add(patch_file.read()) qdb.sql_connection.TRN.add( patch_update_sql, [sql_patch_filename]) qdb.sql_connection.TRN.execute() if exists(py_patch_fp): if verbose: print('\t\tApplying python patch %s...' % py_patch_filename) with open(py_patch_fp) as py_patch: exec(py_patch.read(), globals()) # before moving to jsonb for sample/prep info files (patch 69.sql), # one of the patches used to regenerate the sample information file # for the test Study (1) so a lot of the tests actually expect this. # Now, trying to regenerate directly in the populate_test_db might # require too many dev hours so the easiest is just do it here # UPDATE 01/25/2021: moving to 81.sql as we added timestamps to # prep info files if test and sql_patch_filename == '81.sql': qdb.study.Study(1).sample_template.generate_files() " 32257,"def get_current_time(time_zone=0): """""" Gets the current time. :type time_zone: ``int`` :param time_zone: The time zone offset in hours. :return: The current time. :rtype: ``datetime`` """""" return datetime.utcnow() + timedelta(hours=time_zone) ","def get_current_time(time_zone=0): """""" Gets the current time in a given timezone. :type time_zone: ``int`` :param time_zone: The time zone offset in hours. :return: The current time. :rtype: ``datetime`` """""" return datetime.utcnow() + timedelta(hours=time_zone) " 13286,"def test_note_deprecation_checks_date(): with pytest.warns() as rec: note_deprecation(""This is bad"", since=""RELEASEDAY"", has_codemod=False) assert len(rec) == 1 with pytest.raises(AssertionError): note_deprecation(""This is way too old"", since=""1999-12-31"", has_codemod=False) ","def test_note_deprecation_checks_date(): with pytest.warns(HypothesisDeprecationWarning) as rec: note_deprecation(""This is bad"", since=""RELEASEDAY"", has_codemod=False) assert len(rec) == 1 with pytest.raises(AssertionError): note_deprecation(""This is way too old"", since=""1999-12-31"", has_codemod=False) " 36999,"def main(): parser, args = process_args() if args.action == ""jobs"": tasks = coverage_tasks(args) max_key_len = max(len(t) for t, _ in tasks) for k, v in tasks: print(f""{k:{max_key_len}} {v}"") elif args.action == ""check"": tasks = coverage_tasks(args) # lets only count the main workflow main_tasks = filter(lambda x: x[0].split(""."")[1] == ""main"", tasks) coverage_config_check(len(list(main_tasks)), args) else: parser.print_help() ","def main(): parser, args = process_args() if args.action == ""jobs"": tasks = coverage_tasks(args) max_key_len = max(len(t) for t, _ in tasks) for k, v in tasks: print(f""{k:{max_key_len}} {v}"") elif args.action == ""check"": tasks = coverage_tasks(args) # let's only count the main workflow main_tasks = filter(lambda x: x[0].split(""."")[1] == ""main"", tasks) coverage_config_check(len(list(main_tasks)), args) else: parser.print_help() " 34926,"def from_tuple_type(ty, expr): """"""Convert an expression with the given type into a sequence of expressions. Each expressions maps to a field of the tuple or nested tuples in linear order. Parameters ---------- ty: tvm.Type The type to unpack. expr: The expression from which to extract each sub-field. Returns ------- result: List[tvm.relay.Expr] The list of sub-expressions. """""" return _make.FromTupleType(ty, expr) ","def from_tuple_type(ty, expr): """"""Convert an expression with the given type into a sequence of expressions. Each expression maps to a field of the tuple or nested tuples in linear order. Parameters ---------- ty: tvm.Type The type to unpack. expr: The expression from which to extract each sub-field. Returns ------- result: List[tvm.relay.Expr] The list of sub-expressions. """""" return _make.FromTupleType(ty, expr) " 8851,"def search(*patterns): """"""Decorate a function to be called when a pattern is found in a line. :param str patterns: one or more regular expression(s) Each argument is a regular expression which will trigger the function:: @search('hello', 'here') # will trigger once on ""hello you"" # will trigger twice on ""hello here"" # will trigger once on ""I'm right here!"" This decorator can be used multiple times to add more search rules:: @search('here') @search('hello') # will trigger once on ""hello you"" # will trigger twice on ""hello here"" (once per expression) # will trigger once on ""I'm right here!"" If the Sopel instance is in a channel, or sent a PRIVMSG, where a part of a string matching this expression is said, the function will execute. Note that captured groups here will be retrievable through the :class:`~sopel.trigger.Trigger` object later. The match will also contains the position of the first instance found. Inside the regular expression, some special directives can be used. ``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and ``$nickname`` will be replaced with the nick of the bot:: @search('$nickname') # will trigger once when the bot's nick is in a trigger .. versionadded:: 7.1 .. note:: The regex rule will match for the first instance only, starting from the left of the line, and the function will execute only once per regular expression. To match for each time the expression is found, use the :func:`find` decorator instead. To match only once from the start of the line, use the :func:`rule` decorator instead. """""" def add_attribute(function): if not hasattr(function, ""search_rules""): function.search_rules = [] for value in patterns: if value not in function.search_rules: function.search_rules.append(value) return function return add_attribute ","def search(*patterns): """"""Decorate a function to be called when a pattern is found in a line. :param str patterns: one or more regular expression(s) Each argument is a regular expression which will trigger the function:: @search('hello', 'here') # will trigger once on ""hello you"" # will trigger twice on ""hello here"" # will trigger once on ""I'm right here!"" This decorator can be used multiple times to add more search rules:: @search('here') @search('hello') # will trigger once on ""hello you"" # will trigger twice on ""hello here"" (once per expression) # will trigger once on ""I'm right here!"" If the Sopel instance is in a channel, or sent a PRIVMSG, where a part of a string matching this expression is said, the function will execute. Note that captured groups here will be retrievable through the :class:`~sopel.trigger.Trigger` object later. The match will also contains the position of the first instance found. Inside the regular expression, some special directives can be used. ``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and ``$nickname`` will be replaced with the nick of the bot:: @search('$nickname') # will trigger once when the bot's nick is in a trigger .. versionadded:: 7.1 .. note:: The regex rule will match for the first instance only, starting from the left of the line, and the function will execute only once per regular expression. To match for each time an expression is found, use the :func:`find` decorator instead. To match only once from the start of the line, use the :func:`rule` decorator instead. """""" def add_attribute(function): if not hasattr(function, ""search_rules""): function.search_rules = [] for value in patterns: if value not in function.search_rules: function.search_rules.append(value) return function return add_attribute " 31405,"def get_violation_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """""" violation_id = args[""violation""] if not int(violation_id) >= 1: raise ValueError(""violation must be greater than 0"") violation = client.get_violation(violation_id) if violation['response'][0]['violation_id'] != int(violation_id): return ""Violation with this ID does not exist."" human_readable = '' for i in violation['response']: human_readable += f'### Violation {i[""violation_id""]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {i[""violation_id""]} | {i[""violation_status""]} | {timestamp_to_datestring(i[""violation_event_timestamp""] * 1000)} | {i[""dashboard_url""]} | {i[""user""]} | {i[""app_name""]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix=""GammaViolation"", outputs_key_field=""violation_id"", outputs=violation, raw_response=violation ) ","def get_violation_command(client: Client, args: Dict[str, Any]) -> CommandResults: """""" :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """""" violation_id = args[""violation""] if int(violation_id) < 1: raise ValueError(""violation must be greater than 0"") violation = client.get_violation(violation_id) if violation['response'][0]['violation_id'] != int(violation_id): return ""Violation with this ID does not exist."" human_readable = '' for i in violation['response']: human_readable += f'### Violation {i[""violation_id""]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {i[""violation_id""]} | {i[""violation_status""]} | {timestamp_to_datestring(i[""violation_event_timestamp""] * 1000)} | {i[""dashboard_url""]} | {i[""user""]} | {i[""app_name""]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix=""GammaViolation"", outputs_key_field=""violation_id"", outputs=violation, raw_response=violation ) " 27539,"def create_clip(context, track): """"""Create a new clip based on this context dict"""""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get(""fps"").get(""num"", 24) fps_den = app.project.get(""fps"").get(""den"", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file(context.get(""clip_path"", """")) if is_skipped: return # Get video context video_ctx = context.get(""AX"", {}).get(""V"", {}) audio_ctx = context.get(""AX"", {}).get(""A"", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data[""has_video""] and not is_image(file_data): file_data[""media_type""] = ""video"" elif file_data[""has_video""] and is_image(file_data): file_data[""media_type""] = ""image"" elif file_data[""has_audio""] and not file_data[""has_video""]: file_data[""media_type""] = ""audio"" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Failed to create File object for %s' % clip_path) if (file.data[""media_type""] == ""video"" or file.data[""media_type""] == ""image""): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, ""%s.png"" % file.data[""id""]) else: # Audio file thumb_path = os.path.join(info.PATH, ""images"", ""AudioThumbnail.png"") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data[""file_id""] = file.id clip.data[""title""] = context.get(""clip_path"", """") clip.data[""layer""] = track.data.get(""number"", 1000000) if video_ctx and not audio_ctx: # Only video clip.data[""position""] = timecodeToSeconds(video_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(video_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(video_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""has_audio""] = { ""Points"": [ { ""co"": { ""X"": 1.0, ""Y"": 0.0 # Disable audio }, ""interpolation"": 2 } ] } elif audio_ctx and not video_ctx: # Only audio clip.data[""position""] = timecodeToSeconds(audio_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(audio_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(audio_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""has_video""] = { ""Points"": [ { ""co"": { ""X"": 1.0, ""Y"": 0.0 # Disable video }, ""interpolation"": 2 } ] } else: # Both video and audio clip.data[""position""] = timecodeToSeconds(video_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(video_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(video_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) # Add volume keyframes if context.get(""volume""): clip.data[""volume""] = {""Points"": []} for keyframe in context.get(""volume"", []): clip.data[""volume""][""Points""].append( { ""co"": { ""X"": round(timecodeToSeconds(keyframe.get(""time"", 0.0), fps_num, fps_den) * fps_float), ""Y"": keyframe.get(""value"", 0.0) }, ""interpolation"": 1 # linear } ) # Add alpha keyframes if context.get(""opacity""): clip.data[""alpha""] = {""Points"": []} for keyframe in context.get(""opacity"", []): clip.data[""alpha""][""Points""].append( { ""co"": { ""X"": round(timecodeToSeconds(keyframe.get(""time"", 0.0), fps_num, fps_den) * fps_float), ""Y"": keyframe.get(""value"", 0.0) }, ""interpolation"": 1 # linear } ) # Save clip clip.save() ","def create_clip(context, track): """"""Create a new clip based on this context dict"""""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get(""fps"").get(""num"", 24) fps_den = app.project.get(""fps"").get(""den"", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file(context.get(""clip_path"", """")) if is_skipped: return # Get video context video_ctx = context.get(""AX"", {}).get(""V"", {}) audio_ctx = context.get(""AX"", {}).get(""A"", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data[""has_video""] and not is_image(file_data): file_data[""media_type""] = ""video"" elif file_data[""has_video""] and is_image(file_data): file_data[""media_type""] = ""image"" elif file_data[""has_audio""] and not file_data[""has_video""]: file_data[""media_type""] = ""audio"" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data[""media_type""] == ""video"" or file.data[""media_type""] == ""image""): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, ""%s.png"" % file.data[""id""]) else: # Audio file thumb_path = os.path.join(info.PATH, ""images"", ""AudioThumbnail.png"") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data[""file_id""] = file.id clip.data[""title""] = context.get(""clip_path"", """") clip.data[""layer""] = track.data.get(""number"", 1000000) if video_ctx and not audio_ctx: # Only video clip.data[""position""] = timecodeToSeconds(video_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(video_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(video_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""has_audio""] = { ""Points"": [ { ""co"": { ""X"": 1.0, ""Y"": 0.0 # Disable audio }, ""interpolation"": 2 } ] } elif audio_ctx and not video_ctx: # Only audio clip.data[""position""] = timecodeToSeconds(audio_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(audio_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(audio_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""has_video""] = { ""Points"": [ { ""co"": { ""X"": 1.0, ""Y"": 0.0 # Disable video }, ""interpolation"": 2 } ] } else: # Both video and audio clip.data[""position""] = timecodeToSeconds(video_ctx.get(""timeline_position"", ""00:00:00:00""), fps_num, fps_den) clip.data[""start""] = timecodeToSeconds(video_ctx.get(""clip_start_time"", ""00:00:00:00""), fps_num, fps_den) clip.data[""end""] = timecodeToSeconds(video_ctx.get(""clip_end_time"", ""00:00:00:00""), fps_num, fps_den) # Add volume keyframes if context.get(""volume""): clip.data[""volume""] = {""Points"": []} for keyframe in context.get(""volume"", []): clip.data[""volume""][""Points""].append( { ""co"": { ""X"": round(timecodeToSeconds(keyframe.get(""time"", 0.0), fps_num, fps_den) * fps_float), ""Y"": keyframe.get(""value"", 0.0) }, ""interpolation"": 1 # linear } ) # Add alpha keyframes if context.get(""opacity""): clip.data[""alpha""] = {""Points"": []} for keyframe in context.get(""opacity"", []): clip.data[""alpha""][""Points""].append( { ""co"": { ""X"": round(timecodeToSeconds(keyframe.get(""time"", 0.0), fps_num, fps_den) * fps_float), ""Y"": keyframe.get(""value"", 0.0) }, ""interpolation"": 1 # linear } ) # Save clip clip.save() " 43085,"def load(f, ir=""blackbird""): """"""Load a quantum program from a Blackbird .xbb file. **Example:** The following Blackbird file, ``program1.xbb``, .. code-block:: python3 name test_program version 1.0 Sgate(0.543, 0.0) | 1 BSgate(0.6, 0.1) | [2, 0] MeasureFock() | [0, 1, 2] can be imported into Strawberry Fields using the ``loads`` function: >>> sf.loads(""program1.xbb"") >>> prog.name 'test_program' >>> prog.num_subsystems 3 >>> prog.print() Sgate(0.543, 0) | (q[1]) BSgate(0.6, 0.1) | (q[2], q[0]) MeasureFock | (q[0], q[1], q[2]) Args: f (Union[file, str, pathlib.Path]): File or filename from which the data is loaded. If file is a string or Path, a value with the .xbb extension is expected. ir (str): Intermediate representation language to use. Can be either ""blackbird"" or ""xir"". Returns: prog (Program): Strawberry Fields program """""" own_file = False try: if hasattr(f, ""read""): # argument file is a file-object fid = f else: # argument file is a Path or string filename = os.fspath(f) fid = open(filename, ""r"") own_file = True except TypeError as e: raise ValueError(""file must be a string, pathlib.Path, or file-like object"") from e try: prog_str = fid.read() finally: if own_file: # safely close the file fid.close() # load blackbird program return loads(prog_str, ir=ir) ","def load(f, ir=""blackbird""): """"""Load a quantum program from a Blackbird .xbb or an XIR .xir file. **Example:** The following Blackbird file, ``program1.xbb``, .. code-block:: python3 name test_program version 1.0 Sgate(0.543, 0.0) | 1 BSgate(0.6, 0.1) | [2, 0] MeasureFock() | [0, 1, 2] can be imported into Strawberry Fields using the ``loads`` function: >>> sf.loads(""program1.xbb"") >>> prog.name 'test_program' >>> prog.num_subsystems 3 >>> prog.print() Sgate(0.543, 0) | (q[1]) BSgate(0.6, 0.1) | (q[2], q[0]) MeasureFock | (q[0], q[1], q[2]) Args: f (Union[file, str, pathlib.Path]): File or filename from which the data is loaded. If file is a string or Path, a value with the .xbb extension is expected. ir (str): Intermediate representation language to use. Can be either ""blackbird"" or ""xir"". Returns: prog (Program): Strawberry Fields program """""" own_file = False try: if hasattr(f, ""read""): # argument file is a file-object fid = f else: # argument file is a Path or string filename = os.fspath(f) fid = open(filename, ""r"") own_file = True except TypeError as e: raise ValueError(""file must be a string, pathlib.Path, or file-like object"") from e try: prog_str = fid.read() finally: if own_file: # safely close the file fid.close() # load blackbird program return loads(prog_str, ir=ir) " 43937,"def electron_repulsion(la, lb, lc, ld, ra, rb, rc, rd, alpha, beta, gamma, delta): r""""""Compute electron repulsion integral between four primitive Gaussian functions. The electron repulsion integral between four Gaussian functions denoted by :math:`a`, :math:`b` , :math:`c` and :math:`d` can be computed as [`Helgaker (1995) p820 `_] .. math:: g_{abcd} = \frac{2\pi^{5/2}}{pq\sqrt{p+q}} \sum_{tuv} E_t^{o_a o_b} E_u^{m_a m_b} E_v^{n_a n_b} \sum_{rsw} (-1)^{r+s+w} E_r^{o_c o_d} E_s^{m_c m_d} E_w^{n_c n_d} R_{t+r, u+s, v+w}, where :math:`E` and :math:`R` represent the Hermite Gaussian expansion coefficient and the Hermite Coulomb integral, respectively. The sums go over the angular momentum quantum numbers :math:`o_i + o_j + 1`, :math:`m_i + m_j + 1` and :math:`n_i + n_j + 1` respectively for :math:`t, u, v` and :math:`r, s, w`. The exponents of the Gaussian functions are used to compute :math:`p` and :math:`q` as :math:`p = \alpha + \beta` and :math:`q = \gamma + \delta`. Args: la (tuple[int]): angular momentum for the first Gaussian function lb (tuple[int]): angular momentum for the second Gaussian function lc (tuple[int]): angular momentum for the third Gaussian function ld (tuple[int]): angular momentum for the forth Gaussian function ra (array[float]): position vector of the the first Gaussian function rb (array[float]): position vector of the the second Gaussian function rc (array[float]): position vector of the the third Gaussian function rd (array[float]): position vector of the the forth Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function gamma (array[float]): exponent of the third Gaussian function delta (array[float]): exponent of the forth Gaussian function Returns: array[float]: electron repulsion integral between four Gaussian functions """""" l1, m1, n1 = la l2, m2, n2 = lb l3, m3, n3 = lc l4, m4, n4 = ld p = alpha + beta q = gamma + delta p_ab = ( alpha * ra[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] + beta * rb[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] ) / (alpha + beta) p_cd = ( gamma * rc[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] + delta * rd[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] ) / (gamma + delta) g = 0.0 for t in range(l1 + l2 + 1): for u in range(m1 + m2 + 1): for v in range(n1 + n2 + 1): for r in range(l3 + l4 + 1): for s in range(m3 + m4 + 1): for w in range(n3 + n4 + 1): g = g + expansion(l1, l2, ra[0], rb[0], alpha, beta, t) * expansion( m1, m2, ra[1], rb[1], alpha, beta, u ) * expansion(n1, n2, ra[2], rb[2], alpha, beta, v) * expansion( l3, l4, rc[0], rd[0], gamma, delta, r ) * expansion( m3, m4, rc[1], rd[1], gamma, delta, s ) * expansion( n3, n4, rc[2], rd[2], gamma, delta, w ) * ( (-1) ** (r + s + w) ) * _hermite_coulomb( t + r, u + s, v + w, 0, (p * q) / (p + q), p_ab - p_cd ) g = g * 2 * (anp.pi ** 2.5) / (p * q * anp.sqrt(p + q)) return g ","def electron_repulsion(la, lb, lc, ld, ra, rb, rc, rd, alpha, beta, gamma, delta): r""""""Compute the electron repulsion integral between four primitive Gaussian functions. The electron repulsion integral between four Gaussian functions denoted by :math:`a`, :math:`b` , :math:`c` and :math:`d` can be computed as [`Helgaker (1995) p820 `_] .. math:: g_{abcd} = \frac{2\pi^{5/2}}{pq\sqrt{p+q}} \sum_{tuv} E_t^{o_a o_b} E_u^{m_a m_b} E_v^{n_a n_b} \sum_{rsw} (-1)^{r+s+w} E_r^{o_c o_d} E_s^{m_c m_d} E_w^{n_c n_d} R_{t+r, u+s, v+w}, where :math:`E` and :math:`R` represent the Hermite Gaussian expansion coefficient and the Hermite Coulomb integral, respectively. The sums go over the angular momentum quantum numbers :math:`o_i + o_j + 1`, :math:`m_i + m_j + 1` and :math:`n_i + n_j + 1` respectively for :math:`t, u, v` and :math:`r, s, w`. The exponents of the Gaussian functions are used to compute :math:`p` and :math:`q` as :math:`p = \alpha + \beta` and :math:`q = \gamma + \delta`. Args: la (tuple[int]): angular momentum for the first Gaussian function lb (tuple[int]): angular momentum for the second Gaussian function lc (tuple[int]): angular momentum for the third Gaussian function ld (tuple[int]): angular momentum for the forth Gaussian function ra (array[float]): position vector of the the first Gaussian function rb (array[float]): position vector of the the second Gaussian function rc (array[float]): position vector of the the third Gaussian function rd (array[float]): position vector of the the forth Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function gamma (array[float]): exponent of the third Gaussian function delta (array[float]): exponent of the forth Gaussian function Returns: array[float]: electron repulsion integral between four Gaussian functions """""" l1, m1, n1 = la l2, m2, n2 = lb l3, m3, n3 = lc l4, m4, n4 = ld p = alpha + beta q = gamma + delta p_ab = ( alpha * ra[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] + beta * rb[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] ) / (alpha + beta) p_cd = ( gamma * rc[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] + delta * rd[:, anp.newaxis, anp.newaxis, anp.newaxis, anp.newaxis] ) / (gamma + delta) g = 0.0 for t in range(l1 + l2 + 1): for u in range(m1 + m2 + 1): for v in range(n1 + n2 + 1): for r in range(l3 + l4 + 1): for s in range(m3 + m4 + 1): for w in range(n3 + n4 + 1): g = g + expansion(l1, l2, ra[0], rb[0], alpha, beta, t) * expansion( m1, m2, ra[1], rb[1], alpha, beta, u ) * expansion(n1, n2, ra[2], rb[2], alpha, beta, v) * expansion( l3, l4, rc[0], rd[0], gamma, delta, r ) * expansion( m3, m4, rc[1], rd[1], gamma, delta, s ) * expansion( n3, n4, rc[2], rd[2], gamma, delta, w ) * ( (-1) ** (r + s + w) ) * _hermite_coulomb( t + r, u + s, v + w, 0, (p * q) / (p + q), p_ab - p_cd ) g = g * 2 * (anp.pi ** 2.5) / (p * q * anp.sqrt(p + q)) return g " 47887,"def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() mask_rcnn_model_xml = args.mask_rcnn_model mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin' text_enc_model_xml = args.text_enc_model text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin' text_dec_model_xml = args.text_dec_model text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin' # Plugin initialization for specified device and load extensions library if specified. log.info('Creating Inference Engine...') ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, 'CPU') # Read IR log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin)) mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin) log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin)) text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin) log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin)) text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin) if 'CPU' in args.device: supported_layers = ie.query_network(mask_rcnn_net, 'CPU') not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers] if len(not_supported_layers) != 0: log.error('Following layers are not supported by the plugin for specified device {}:\n {}'. format(args.device, ', '.join(not_supported_layers))) log.error(""Please try to specify cpu extensions library path in sample's command line parameters using -l "" ""or --cpu_extension command line argument"") sys.exit(1) required_input_keys = {'im_data', 'im_info'} assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \ 'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys)) required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'} assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \ 'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys)) n, c, h, w = mask_rcnn_net.inputs['im_data'].shape assert n == 1, 'Only batch 1 is supported by the demo application' log.info('Loading IR to the plugin...') mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2) text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device) text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device) for name, input in text_dec_exec_net.inputs.items(): if len(input.shape) == 3: if input.shape[1] == 1: trd_input_prev_hidden = name else: trd_input_encoder_outputs = name elif len(input.shape) == 1: trd_input_prev_symbol = name for name, output in text_dec_exec_net.outputs.items(): if len(output.shape) == 3: trd_output_cur_hidden = name elif len(output.shape) == 2: trd_output_symbols_distr = name hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape del mask_rcnn_net del text_enc_net del text_dec_net try: input_source = int(args.input_source) except ValueError: input_source = args.input_source if os.path.isdir(input_source): cap = FolderCapture(input_source) else: cap = cv2.VideoCapture(input_source) if not cap.isOpened(): log.error('Failed to open ""{}""'.format(args.input_source)) if isinstance(cap, cv2.VideoCapture): cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) if args.no_track: tracker = None else: tracker = StaticIOUTracker() visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores) render_time = 0 log.info('Starting inference...') print(""To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"") while cap.isOpened(): ret, frame = cap.read() if not ret: break if not args.keep_aspect_ratio: # Resize the image to a target size. scale_x = w / frame.shape[1] scale_y = h / frame.shape[0] input_image = cv2.resize(frame, (w, h)) else: # Resize the image to keep the same aspect ratio and to fit it to a window of a target size. scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1]) input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y) input_image_size = input_image.shape[:2] input_image = np.pad(input_image, ((0, h - input_image_size[0]), (0, w - input_image_size[1]), (0, 0)), mode='constant', constant_values=0) # Change data layout from HWC to CHW. input_image = input_image.transpose((2, 0, 1)) input_image = input_image.reshape((n, c, h, w)).astype(np.float32) input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32) # Run the net. inf_start = time.time() outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info}) # Parse detection results of the current request boxes = outputs['boxes'] scores = outputs['scores'] classes = outputs['classes'].astype(np.uint32) raw_masks = outputs['raw_masks'] text_features = outputs['text_features'] # Filter out detections with low confidence. detections_filter = scores > args.prob_threshold scores = scores[detections_filter] classes = classes[detections_filter] boxes = boxes[detections_filter] raw_masks = raw_masks[detections_filter] text_features = text_features[detections_filter] boxes[:, 0::2] /= scale_x boxes[:, 1::2] /= scale_y masks = [] for box, cls, raw_mask in zip(boxes, classes, raw_masks): raw_cls_mask = raw_mask[cls, ...] mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1]) masks.append(mask) texts = [] for feature in text_features: feature = text_enc_exec_net.infer({'input': feature}) feature = list(feature.values())[0] feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1)) feature = np.transpose(feature, (0, 2, 1)) hidden = np.zeros(hidden_shape) prev_symbol_index = np.ones((1,)) * SOS_INDEX text = '' for i in range(MAX_SEQ_LEN): decoder_output = text_dec_exec_net.infer({ trd_input_prev_symbol: prev_symbol_index, trd_input_prev_hidden: hidden, trd_input_encoder_outputs: feature}) symbols_distr = decoder_output[trd_output_symbols_distr] prev_symbol_index = int(np.argmax(symbols_distr, axis=1)) if prev_symbol_index == EOS_INDEX: break text += args.alphabet[prev_symbol_index] hidden = decoder_output[trd_output_cur_hidden] texts.append(text) inf_end = time.time() inf_time = inf_end - inf_start render_start = time.time() if len(boxes) and args.raw_output_message: log.info('Detected boxes:') log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ') for box, cls, score, mask in zip(boxes, classes, scores, masks): log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box)) # Get instance track IDs. masks_tracks_ids = None if tracker is not None: masks_tracks_ids = tracker(masks, classes) # Visualize masks. frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids) # Draw performance stats. inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000) render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000) cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1) # Print performance counters. if args.perf_counts: perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts() log.info('Performance counters:') print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status', 'real_time, us')) for layer, stats in perf_counts.items(): print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'])) if not args.no_show: # Show resulting image. cv2.imshow('Results', frame) render_end = time.time() render_time = render_end - render_start if not args.no_show: key = cv2.waitKey(args.delay) esc_code = 27 if key == esc_code: break cv2.destroyAllWindows() cap.release() ","def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() mask_rcnn_model_xml = args.mask_rcnn_model mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin' text_enc_model_xml = args.text_enc_model text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin' text_dec_model_xml = args.text_dec_model text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin' # Plugin initialization for specified device and load extensions library if specified. log.info('Creating Inference Engine...') ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, 'CPU') # Read IR log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin)) mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin) log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin)) text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin) log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin)) text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin) if 'CPU' in args.device: supported_layers = ie.query_network(mask_rcnn_net, 'CPU') not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers] if len(not_supported_layers) != 0: log.error('Following layers are not supported by the plugin for specified device {}:\n {}'. format(args.device, ', '.join(not_supported_layers))) log.error(""Please try to specify cpu extensions library path in sample's command line parameters using -l "" ""or --cpu_extension command line argument"") sys.exit(1) required_input_keys = {'im_data', 'im_info'} assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \ 'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys)) required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'} assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \ 'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys)) n, c, h, w = mask_rcnn_net.inputs['im_data'].shape assert n == 1, 'Only batch 1 is supported by the demo application' log.info('Loading IR to the plugin...') mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2) text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device) text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device) for name, input in text_dec_exec_net.inputs.items(): if len(input.shape) == 3: if input.shape[1] == 1: trd_input_prev_hidden = name else: trd_input_encoder_outputs = name elif len(input.shape) == 1: trd_input_prev_symbol = name for name, output in text_dec_exec_net.outputs.items(): if len(output.shape) == 3: trd_output_cur_hidden = name elif len(output.shape) == 2: trd_output_symbols_distr = name hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape del mask_rcnn_net del text_enc_net del text_dec_net try: input_source = int(args.input_source) except ValueError: input_source = args.input_source if os.path.isdir(input_source): cap = FolderCapture(input_source) else: cap = cv2.VideoCapture(input_source) if not cap.isOpened(): log.error('Failed to open ""{}""'.format(args.input_source)) if isinstance(cap, cv2.VideoCapture): cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) if args.no_track: tracker = None else: tracker = StaticIOUTracker() visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores) render_time = 0 log.info('Starting inference...') print(""To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"") while cap.isOpened(): ret, frame = cap.read() if not ret: break if not args.keep_aspect_ratio: # Resize the image to a target size. scale_x = w / frame.shape[1] scale_y = h / frame.shape[0] input_image = cv2.resize(frame, (w, h)) else: # Resize the image to keep the same aspect ratio and to fit it to a window of a target size. scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1]) input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y) input_image_size = input_image.shape[:2] input_image = np.pad(input_image, ((0, h - input_image_size[0]), (0, w - input_image_size[1]), (0, 0)), mode='constant', constant_values=0) # Change data layout from HWC to CHW. input_image = input_image.transpose((2, 0, 1)) input_image = input_image.reshape((n, c, h, w)).astype(np.float32) input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32) # Run the net. inf_start = time.time() outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info}) # Parse detection results of the current request boxes = outputs['boxes'] scores = outputs['scores'] classes = outputs['classes'].astype(np.uint32) raw_masks = outputs['raw_masks'] text_features = outputs['text_features'] # Filter out detections with low confidence. detections_filter = scores > args.prob_threshold scores = scores[detections_filter] classes = classes[detections_filter] boxes = boxes[detections_filter] raw_masks = raw_masks[detections_filter] text_features = text_features[detections_filter] boxes[:, 0::2] /= scale_x boxes[:, 1::2] /= scale_y masks = [] for box, cls, raw_mask in zip(boxes, classes, raw_masks): raw_cls_mask = raw_mask[cls, ...] mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1]) masks.append(mask) texts = [] for feature in text_features: feature = text_enc_exec_net.infer({'input': feature}) feature = tuple(feature.values())[0] feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1)) feature = np.transpose(feature, (0, 2, 1)) hidden = np.zeros(hidden_shape) prev_symbol_index = np.ones((1,)) * SOS_INDEX text = '' for i in range(MAX_SEQ_LEN): decoder_output = text_dec_exec_net.infer({ trd_input_prev_symbol: prev_symbol_index, trd_input_prev_hidden: hidden, trd_input_encoder_outputs: feature}) symbols_distr = decoder_output[trd_output_symbols_distr] prev_symbol_index = int(np.argmax(symbols_distr, axis=1)) if prev_symbol_index == EOS_INDEX: break text += args.alphabet[prev_symbol_index] hidden = decoder_output[trd_output_cur_hidden] texts.append(text) inf_end = time.time() inf_time = inf_end - inf_start render_start = time.time() if len(boxes) and args.raw_output_message: log.info('Detected boxes:') log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ') for box, cls, score, mask in zip(boxes, classes, scores, masks): log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box)) # Get instance track IDs. masks_tracks_ids = None if tracker is not None: masks_tracks_ids = tracker(masks, classes) # Visualize masks. frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids) # Draw performance stats. inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000) render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000) cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1) # Print performance counters. if args.perf_counts: perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts() log.info('Performance counters:') print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status', 'real_time, us')) for layer, stats in perf_counts.items(): print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'])) if not args.no_show: # Show resulting image. cv2.imshow('Results', frame) render_end = time.time() render_time = render_end - render_start if not args.no_show: key = cv2.waitKey(args.delay) esc_code = 27 if key == esc_code: break cv2.destroyAllWindows() cap.release() " 10355,"def is_module_path(path): """""" :type path: str :rtype: bool """""" return '/lib/ansible/modules/' in path or '/plugins/modules/' in path or '/module_utils/' in path ","def is_module_path(path): """""" :type path: str :rtype: bool """""" return '/lib/ansible/modules/' in path or '/lib/ansible/module_utils/' in path or '/plugins/modules/' in path or '/plugins/module_utils/' in path " 45740,"def forecast( precip, velocity, timesteps, precip_thr=None, n_cascade_levels=6, extrap_method=""semilagrangian"", decomp_method=""fft"", bandpass_filter_method=""gaussian"", ar_order=2, conditional=False, probmatching_method=""cdf"", num_workers=1, fft_method=""numpy"", domain=""spatial"", extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """""" Generate a nowcast by using the Spectral Prognosis (S-PROG) method. Parameters ---------- precip: array-like Array of shape (ar_order+1,m,n) containing the input precipitation fields ordered by timestamp from oldest to newest. The time steps between the inputs are assumed to be regular. velocity: array-like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list of floats Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. precip_thr: float, required The threshold value for minimum observable precipitation intensity. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. decomp_method: {'fft'}, optional Name of the cascade decomposition method to use. See the documentation of pysteps.cascade.interface. bandpass_filter_method: {'gaussian', 'uniform'}, optional Name of the bandpass filter method to use with the cascade decomposition. See the documentation of pysteps.cascade.interface. ar_order: int, optional The order of the autoregressive model to use. Must be >= 1. conditional: bool, optional If set to True, compute the statistics of the precipitation field conditionally by excluding pixels where the values are below the threshold precip_thr. probmatching_method: {'cdf','mean',None}, optional Method for matching the conditional statistics of the forecast field (areas with precipitation intensity above the threshold precip_thr) with those of the most recently observed one. 'cdf'=map the forecast CDF to the observed one, 'mean'=adjust only the mean value, None=no matching applied. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is enabled or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. domain: {""spatial"", ""spectral""} If ""spatial"", all computations are done in the spatial domain (the classical S-PROG model). If ""spectral"", the AR(2) models are applied directly in the spectral domain to reduce memory footprint and improve performance :cite:`PCH2019a`. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool If set to True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input precipitation fields precip. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). See also -------- pysteps.extrapolation.interface, pysteps.cascade.interface References ---------- :cite:`Seed2003`, :cite:`PCH2019a` """""" _check_inputs(precip, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() if filter_kwargs is None: filter_kwargs = dict() if np.any(~np.isfinite(velocity)): raise ValueError(""velocity contains non-finite values"") if precip_thr is None: raise ValueError(""precip_thr required but not specified"") print(""Computing S-PROG nowcast"") print(""------------------------"") print("""") print(""Inputs"") print(""------"") print(f""input dimensions: {precip.shape[1]}x{precip.shape[2]}"") print("""") print(""Methods"") print(""-------"") print(f""extrapolation: {extrap_method}"") print(f""bandpass filter: {bandpass_filter_method}"") print(f""decomposition: {decomp_method}"") print(""conditional statistics: {}"".format(""yes"" if conditional else ""no"")) print(f""probability matching: {probmatching_method}"") print(f""FFT method: {fft_method}"") print(f""domain: {domain}"") print("""") print(""Parameters"") print(""----------"") if isinstance(timesteps, int): print(f""number of time steps: {timesteps}"") else: print(f""time steps: {timesteps}"") print(f""parallel threads: {num_workers}"") print(f""number of cascade levels: {n_cascade_levels}"") print(f""order of the AR(p) model: {ar_order}"") print(f""precip. intensity threshold: {precip_thr}"") if measure_time: starttime_init = time.time() fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers) m, n = precip.shape[1:] # initialize the band-pass filter filter_method = cascade.get_method(bandpass_filter_method) filter = filter_method((m, n), n_cascade_levels, **filter_kwargs) decomp_method, recomp_method = cascade.get_method(decomp_method) extrapolator_method = extrapolation.get_method(extrap_method) precip = precip[-(ar_order + 1) :, :, :].copy() precip_min = np.nanmin(precip) # determine the domain mask from non-finite values domain_mask = np.logical_or.reduce( [~np.isfinite(precip[i, :]) for i in range(precip.shape[0])] ) # determine the precipitation threshold mask if conditional: mask_thr = np.logical_and.reduce( [precip[i, :, :] >= precip_thr for i in range(precip.shape[0])] ) else: mask_thr = None # initialize the extrapolator x_values, y_values = np.meshgrid( np.arange(precip.shape[2]), np.arange(precip.shape[1]) ) xy_coords = np.stack([x_values, y_values]) extrap_kwargs = extrap_kwargs.copy() extrap_kwargs[""xy_coords""] = xy_coords extrap_kwargs[""allow_nonfinite_values""] = ( True if np.any(~np.isfinite(precip)) else False ) # advect the previous precipitation fields to the same position with the # most recent one (i.e. transform them into the Lagrangian coordinates) res = list() def f(precip, i): return extrapolator_method( precip[i, :], velocity, ar_order - i, ""min"", **extrap_kwargs )[-1] for i in range(ar_order): if not DASK_IMPORTED: precip[i, :, :] = f(precip, i) else: res.append(dask.delayed(f)(precip, i)) if DASK_IMPORTED: num_workers_ = len(res) if num_workers > len(res) else num_workers precip = np.stack( list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]] ) # replace non-finite values with the minimum value precip = precip.copy() for i in range(precip.shape[0]): precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :]) # compute the cascade decompositions of the input precipitation fields precip_d = [] for i in range(ar_order + 1): precip_ = decomp_method( precip[i, :, :], filter, mask=mask_thr, fft_method=fft, output_domain=domain, normalize=True, compute_stats=True, compact_output=True, ) precip_d.append(precip_) # rearrange the cascade levels into a four-dimensional array of shape # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model precip_c = nowcast_utils.stack_cascades( precip_d, n_cascade_levels, convert_to_full_arrays=True ) # compute lag-l temporal autocorrelation coefficients for each cascade level gamma = np.empty((n_cascade_levels, ar_order)) for i in range(n_cascade_levels): if domain == ""spatial"": gamma[i, :] = correlation.temporal_autocorrelation( precip_c[i], mask=mask_thr ) else: gamma[i, :] = correlation.temporal_autocorrelation( precip_c[i], domain=""spectral"", x_shape=precip.shape[1:] ) precip_c = nowcast_utils.stack_cascades( precip_d, n_cascade_levels, convert_to_full_arrays=False ) precip_d = precip_d[-1] nowcast_utils.print_corrcoefs(gamma) if ar_order == 2: # adjust the lag-2 correlation coefficient to ensure that the AR(p) # process is stationary for i in range(n_cascade_levels): gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1]) # estimate the parameters of the AR(p) model from the autocorrelation # coefficients phi = np.empty((n_cascade_levels, ar_order + 1)) for i in range(n_cascade_levels): phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :]) nowcast_utils.print_ar_params(phi) # discard all except the p-1 last cascades because they are not needed for # the AR(p) model precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)] if probmatching_method == ""mean"": mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr]) else: mu_0 = None # compute precipitation mask and wet area ratio mask_p = precip[-1, :, :] >= precip_thr war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2]) if measure_time: init_time = time.time() - starttime_init precip = precip[-1, :, :] print(""Starting nowcast computation."") precip_f = [] state = {""precip_c"": precip_c, ""precip_d"": precip_d} params = { ""domain"": domain, ""domain_mask"": domain_mask, ""fft"": fft, ""mu_0"": mu_0, ""n_cascade_levels"": n_cascade_levels, ""phi"": phi, ""precip_0"": precip, ""precip_min"": precip_min, ""probmatching_method"": probmatching_method, ""recomp_method"": recomp_method, ""war"": war, } precip_f = nowcast_main_loop( precip, velocity, state, timesteps, extrap_method, _update, extrap_kwargs=extrap_kwargs, params=params, measure_time=measure_time, ) if measure_time: precip_f, mainloop_time = precip_f precip_f = np.stack(precip_f) if measure_time: return precip_f, init_time, mainloop_time else: return precip_f ","def forecast( precip, velocity, timesteps, precip_thr=None, n_cascade_levels=6, extrap_method=""semilagrangian"", decomp_method=""fft"", bandpass_filter_method=""gaussian"", ar_order=2, conditional=False, probmatching_method=""cdf"", num_workers=1, fft_method=""numpy"", domain=""spatial"", extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """""" Generate a nowcast by using the Spectral Prognosis (S-PROG) method. Parameters ---------- precip: array-like Array of shape (ar_order+1,m,n) containing the input precipitation fields ordered by timestamp from oldest to newest. The time steps between the inputs are assumed to be regular. velocity: array-like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list of floats Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. precip_thr: float, required The threshold value for minimum observable precipitation intensity. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. decomp_method: {'fft'}, optional Name of the cascade decomposition method to use. See the documentation of pysteps.cascade.interface. bandpass_filter_method: {'gaussian', 'uniform'}, optional Name of the bandpass filter method to use with the cascade decomposition. See the documentation of pysteps.cascade.interface. ar_order: int, optional The order of the autoregressive model to use. Must be >= 1. conditional: bool, optional If set to True, compute the statistics of the precipitation field conditionally by excluding pixels where the values are below the threshold precip_thr. probmatching_method: {'cdf','mean',None}, optional Method for matching the conditional statistics of the forecast field (areas with precipitation intensity above the threshold precip_thr) with those of the most recently observed one. 'cdf'=map the forecast CDF to the observed one, 'mean'=adjust only the mean value, None=no matching applied. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is enabled or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. domain: {""spatial"", ""spectral""} If ""spatial"", all computations are done in the spatial domain (the classical S-PROG model). If ""spectral"", the AR(2) models are applied directly in the spectral domain to reduce memory footprint and improve performance :cite:`PCH2019a`. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool If set to True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input precipitation fields precip. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). See also -------- pysteps.extrapolation.interface, pysteps.cascade.interface References ---------- :cite:`Seed2003`, :cite:`PCH2019a` """""" _check_inputs(precip, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() if filter_kwargs is None: filter_kwargs = dict() if np.any(~np.isfinite(velocity)): raise ValueError(""velocity contains non-finite values"") if precip_thr is None: raise ValueError(""precip_thr required but not specified"") print(""Computing S-PROG nowcast"") print(""------------------------"") print("""") print(""Inputs"") print(""------"") print(f""input dimensions: {precip.shape[1]}x{precip.shape[2]}"") print("""") print(""Methods"") print(""-------"") print(f""extrapolation: {extrap_method}"") print(f""bandpass filter: {bandpass_filter_method}"") print(f""decomposition: {decomp_method}"") print(""conditional statistics: {}"".format(""yes"" if conditional else ""no"")) print(f""probability matching: {probmatching_method}"") print(f""FFT method: {fft_method}"") print(f""domain: {domain}"") print("""") print(""Parameters"") print(""----------"") if isinstance(timesteps, int): print(f""number of time steps: {timesteps}"") else: print(f""time steps: {timesteps}"") print(f""parallel threads: {num_workers}"") print(f""number of cascade levels: {n_cascade_levels}"") print(f""order of the AR(p) model: {ar_order}"") print(f""precip. intensity threshold: {precip_thr}"") if measure_time: starttime_init = time.time() fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers) m, n = precip.shape[1:] # initialize the band-pass filter filter_method = cascade.get_method(bandpass_filter_method) filter = filter_method((m, n), n_cascade_levels, **filter_kwargs) decomp_method, recomp_method = cascade.get_method(decomp_method) extrapolator_method = extrapolation.get_method(extrap_method) precip = precip[-(ar_order + 1) :, :, :].copy() precip_min = np.nanmin(precip) # determine the domain mask from non-finite values domain_mask = np.logical_or.reduce( [~np.isfinite(precip[i, :]) for i in range(precip.shape[0])] ) # determine the precipitation threshold mask if conditional: mask_thr = np.logical_and.reduce( [precip[i, :, :] >= precip_thr for i in range(precip.shape[0])] ) else: mask_thr = None # initialize the extrapolator x_values, y_values = np.meshgrid( np.arange(precip.shape[2]), np.arange(precip.shape[1]) ) xy_coords = np.stack([x_values, y_values]) extrap_kwargs = extrap_kwargs.copy() extrap_kwargs[""xy_coords""] = xy_coords extrap_kwargs[""allow_nonfinite_values""] = ( True if np.any(~np.isfinite(precip)) else False ) # advect the previous precipitation fields to the same position with the # most recent one (i.e. transform them into the Lagrangian coordinates) res = list() def f(precip, i): return extrapolator_method( precip[i, :], velocity, ar_order - i, ""min"", **extrap_kwargs )[-1] for i in range(ar_order): if not DASK_IMPORTED: precip[i, :, :] = f(precip, i) else: res.append(dask.delayed(f)(precip, i)) if DASK_IMPORTED: num_workers_ = len(res) if num_workers > len(res) else num_workers precip = np.stack( list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]] ) # replace non-finite values with the minimum value precip = precip.copy() for i in range(precip.shape[0]): precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :]) # compute the cascade decompositions of the input precipitation fields precip_d = [] for i in range(ar_order + 1): precip_d_ = decomp_method( precip[i, :, :], filter, mask=mask_thr, fft_method=fft, output_domain=domain, normalize=True, compute_stats=True, compact_output=True, ) precip_d.append(precip_) # rearrange the cascade levels into a four-dimensional array of shape # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model precip_c = nowcast_utils.stack_cascades( precip_d, n_cascade_levels, convert_to_full_arrays=True ) # compute lag-l temporal autocorrelation coefficients for each cascade level gamma = np.empty((n_cascade_levels, ar_order)) for i in range(n_cascade_levels): if domain == ""spatial"": gamma[i, :] = correlation.temporal_autocorrelation( precip_c[i], mask=mask_thr ) else: gamma[i, :] = correlation.temporal_autocorrelation( precip_c[i], domain=""spectral"", x_shape=precip.shape[1:] ) precip_c = nowcast_utils.stack_cascades( precip_d, n_cascade_levels, convert_to_full_arrays=False ) precip_d = precip_d[-1] nowcast_utils.print_corrcoefs(gamma) if ar_order == 2: # adjust the lag-2 correlation coefficient to ensure that the AR(p) # process is stationary for i in range(n_cascade_levels): gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1]) # estimate the parameters of the AR(p) model from the autocorrelation # coefficients phi = np.empty((n_cascade_levels, ar_order + 1)) for i in range(n_cascade_levels): phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :]) nowcast_utils.print_ar_params(phi) # discard all except the p-1 last cascades because they are not needed for # the AR(p) model precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)] if probmatching_method == ""mean"": mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr]) else: mu_0 = None # compute precipitation mask and wet area ratio mask_p = precip[-1, :, :] >= precip_thr war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2]) if measure_time: init_time = time.time() - starttime_init precip = precip[-1, :, :] print(""Starting nowcast computation."") precip_f = [] state = {""precip_c"": precip_c, ""precip_d"": precip_d} params = { ""domain"": domain, ""domain_mask"": domain_mask, ""fft"": fft, ""mu_0"": mu_0, ""n_cascade_levels"": n_cascade_levels, ""phi"": phi, ""precip_0"": precip, ""precip_min"": precip_min, ""probmatching_method"": probmatching_method, ""recomp_method"": recomp_method, ""war"": war, } precip_f = nowcast_main_loop( precip, velocity, state, timesteps, extrap_method, _update, extrap_kwargs=extrap_kwargs, params=params, measure_time=measure_time, ) if measure_time: precip_f, mainloop_time = precip_f precip_f = np.stack(precip_f) if measure_time: return precip_f, init_time, mainloop_time else: return precip_f " 30901,"def search_by_name_command(): args = demisto.args() name = args.get('name') limit = args.get('limit') or '10' if limit and isinstance(limit, str) and not limit.isdigit(): return_error('limit argument must be an integer.') url_suffix = '/search?query={0}&limit={1}'.format(name, limit) res = tq_request('GET', url_suffix) indicator_context = [{'ID': e['id'], 'Value': e['value']} for e in res['data'] if e['object'] == 'indicator'] event_context = [{'ID': e['id'], 'Title': e['value']} for e in res['data'] if e['object'] == 'event'] adversary_context = [{'ID': e['id'], 'Name': e['value']} for e in res['data'] if e['object'] == 'adversary'] file_context = [{'ID': e['id'], 'Name': e['value'].split()[1]} for e in res['data'] if e['object'] == 'attachment'] # file value in response is returned in the form [""title"" name], thus we use the split method above entry_context = { CONTEXT_PATH['indicator']: indicator_context, CONTEXT_PATH['event']: event_context, CONTEXT_PATH['adversary']: adversary_context, CONTEXT_PATH['attachment']: file_context } # Remove items with empty values: entry_context = {k: v for k, v in entry_context.items() if v} readable = build_readable_for_search_by_name(indicator_context, event_context, adversary_context, file_context) return_outputs(readable, entry_context, res) ","def search_by_name_command(): args = demisto.args() name = args.get('name') limit = args.get('limit', '10') if limit and isinstance(limit, str) and not limit.isdigit(): return_error('limit argument must be an integer.') url_suffix = '/search?query={0}&limit={1}'.format(name, limit) res = tq_request('GET', url_suffix) indicator_context = [{'ID': e['id'], 'Value': e['value']} for e in res['data'] if e['object'] == 'indicator'] event_context = [{'ID': e['id'], 'Title': e['value']} for e in res['data'] if e['object'] == 'event'] adversary_context = [{'ID': e['id'], 'Name': e['value']} for e in res['data'] if e['object'] == 'adversary'] file_context = [{'ID': e['id'], 'Name': e['value'].split()[1]} for e in res['data'] if e['object'] == 'attachment'] # file value in response is returned in the form [""title"" name], thus we use the split method above entry_context = { CONTEXT_PATH['indicator']: indicator_context, CONTEXT_PATH['event']: event_context, CONTEXT_PATH['adversary']: adversary_context, CONTEXT_PATH['attachment']: file_context } # Remove items with empty values: entry_context = {k: v for k, v in entry_context.items() if v} readable = build_readable_for_search_by_name(indicator_context, event_context, adversary_context, file_context) return_outputs(readable, entry_context, res) " 35389,"def string_to_uidgid(userandgroup): """""" Translate the userandgroup string to uid and gid. The userandgroup parameter must be a string of the format '[:]'. User and group can be strings or integers. If no group is given, -1 will be returned for gid. """""" params = userandgroup.split(':') if len(params) > 2: return None, None, ""User and group '%s' are in wrong format. Expected [:group]"" % userandgroup gid = -1 if len(params) == 2: if params[1].isnumeric(): gid = int(params[1]) else: try: gr = grp.getgrnam(params[1]) gid = gr.gr_gid except Exception as e: return None, None, 'Could not resolve group %s: %s' % (params[1], str(e)) if params[0].isnumeric(): uid = int(params[0]) else: try: passwd = pwd.getpwnam(params[0]) uid = passwd.pw_uid except Exception as e: return None, None, 'Could not resolve user %s: %s' % (params[0], str(e)) return uid, gid, None ","def string_to_uidgid(userandgroup): """""" Translate the userandgroup string to uid and gid. The userandgroup parameter must be a string of the format '[:]'. User and group can be strings or integers. If no group is given, -1 will be returned for gid. """""" params = userandgroup.split(':') if len(params) > 2: return None, None, f""User and group '{userandgroup}' are in wrong format. Expected [:group]"" gid = -1 if len(params) == 2: if params[1].isnumeric(): gid = int(params[1]) else: try: gr = grp.getgrnam(params[1]) gid = gr.gr_gid except Exception as e: return None, None, 'Could not resolve group %s: %s' % (params[1], str(e)) if params[0].isnumeric(): uid = int(params[0]) else: try: passwd = pwd.getpwnam(params[0]) uid = passwd.pw_uid except Exception as e: return None, None, 'Could not resolve user %s: %s' % (params[0], str(e)) return uid, gid, None " 31109,"def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: result = advanced_case_search( startTimestamp=args.get(""start_timestamp"", None), endTimestamp=args.get(""end_timestamp"", None), limit=args.get(""limit"", None), offset=args.get(""offset"", None), includeDeleted=args.get(""include_deleted"", None), subCriteria=args.get(""sub_criteria"", None), exclude=args.get(""exclude"", None), required=args.get(""required"", None), customerID=args.get(""customer_id"", None), caseID=args.get(""case_id"", None), customer=args.get(""customer"", None), type=args.get(""type"", None), service=args.get(""service"", None), category=args.get(""category"", None), status=args.get(""status"", None), priority=args.get(""priority"", None), assetID=args.get(""asset_id"", None), tag=args.get(""tag"", None), workflow=args.get(""workflow"", None), field=args.get(""field"", None), keywords=args.get(""keywords"", None), timeFieldStrategy=args.get(""time_field_strategy"", None), timeMatchStrategy=args.get(""time_match_strategy"", None), keywordFieldStrategy=args.get(""keyword_field_strategy"", None), keywordMatchStrategy=args.get(""keyword_match_strategy"", None), user=args.get(""user"", None), userFieldStrategy=args.get(""user_field_strategy"", None), userAssigned=args.get(""user_assigned"", None), techAssigned=args.get(""tech_assigned"", None), includeWorkflows=args.get(""include_workflows"", None), includeDescription=args.get(""include_description"", None), accessMode=args.get(""access_mode"", None), explicitAccess=args.get(""explicit_access"", None), sortBy=args.get(""sort_by"", None), includeFlags=args.get(""include_flags"", None), excludeFlags=args.get(""exclude_flags"", None), ) readable_output = f""Advanced Case Search: {result['count']} result(s)\n"" readable_output += tableToMarkdown( ""Output not suitable for playground"", result[""data""] ) return CommandResults( readable_output=readable_output, outputs_prefix=""Argus.Cases"", outputs=result, raw_response=result, ) ","def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults: result = advanced_case_search( startTimestamp=args.get(""start_timestamp"", None), endTimestamp=args.get(""end_timestamp"", None), limit=args.get(""limit"", None), offset=args.get(""offset"", None), includeDeleted=args.get(""include_deleted"", None), subCriteria=args.get(""sub_criteria"", None), exclude=args.get(""exclude"", None), required=args.get(""required"", None), customerID=argToList(args.get(""customer_id"", None)), caseID=args.get(""case_id"", None), customer=args.get(""customer"", None), type=args.get(""type"", None), service=args.get(""service"", None), category=args.get(""category"", None), status=args.get(""status"", None), priority=args.get(""priority"", None), assetID=args.get(""asset_id"", None), tag=args.get(""tag"", None), workflow=args.get(""workflow"", None), field=args.get(""field"", None), keywords=args.get(""keywords"", None), timeFieldStrategy=args.get(""time_field_strategy"", None), timeMatchStrategy=args.get(""time_match_strategy"", None), keywordFieldStrategy=args.get(""keyword_field_strategy"", None), keywordMatchStrategy=args.get(""keyword_match_strategy"", None), user=args.get(""user"", None), userFieldStrategy=args.get(""user_field_strategy"", None), userAssigned=args.get(""user_assigned"", None), techAssigned=args.get(""tech_assigned"", None), includeWorkflows=args.get(""include_workflows"", None), includeDescription=args.get(""include_description"", None), accessMode=args.get(""access_mode"", None), explicitAccess=args.get(""explicit_access"", None), sortBy=args.get(""sort_by"", None), includeFlags=args.get(""include_flags"", None), excludeFlags=args.get(""exclude_flags"", None), ) readable_output = f""Advanced Case Search: {result['count']} result(s)\n"" readable_output += tableToMarkdown( ""Output not suitable for playground"", result[""data""] ) return CommandResults( readable_output=readable_output, outputs_prefix=""Argus.Cases"", outputs=result, raw_response=result, ) " 6060,"def installDiracOS(releaseConfig): """""" Install the DIRAC os. :param str releaseConfig: the version of the DIRAC OS """""" diracos, diracOSVersion = releaseConfig.getDiracOSVersion(cliParams.diracOSVersion) if not diracOSVersion: logERROR(""No diracos defined"") return False tarsURL = None if cliParams.installSource: tarsURL = cliParams.installSource else: # if : is not exists in diracos version, we use diracos from DIRAC if diracos == 'diracos': tarsURL = releaseConfig.getDiracOsLocation(diracOSFromDIRAC=True)['Value'] else: tarsURL = releaseConfig.getDiracOsLocation()['Value'] if not tarsURL: tarsURL = releaseConfig.getTarsLocation('DIRAC')['Value'] logWARN(""DIRACOS location is not specified using %s"" % tarsURL) if not downloadAndExtractTarball(tarsURL, diracos, diracOSVersion, cache=True): return False logNOTICE(""Fixing externals paths..."") fixBuildPaths() logNOTICE(""Running externals post install..."") checkPlatformAliasLink() return True ","def installDiracOS(releaseConfig): """""" Install the DIRAC os. :param str releaseConfig: the version of the DIRAC OS """""" diracos, diracOSVersion = releaseConfig.getDiracOSVersion(cliParams.diracOSVersion) if not diracOSVersion: logERROR(""No diracos defined"") return False tarsURL = None if cliParams.installSource: tarsURL = cliParams.installSource else: # if "":"" is not present in diracos name, we take the diracos tarball from vanilla DIRAC location if diracos == 'diracos': tarsURL = releaseConfig.getDiracOsLocation(diracOSFromDIRAC=True)['Value'] else: tarsURL = releaseConfig.getDiracOsLocation()['Value'] if not tarsURL: tarsURL = releaseConfig.getTarsLocation('DIRAC')['Value'] logWARN(""DIRACOS location is not specified using %s"" % tarsURL) if not downloadAndExtractTarball(tarsURL, diracos, diracOSVersion, cache=True): return False logNOTICE(""Fixing externals paths..."") fixBuildPaths() logNOTICE(""Running externals post install..."") checkPlatformAliasLink() return True " 5855,"def test_sg_filter_valid_window_length_3d(): """"""Tests that the window_length check is using the correct axis."""""" x = np.random.rand(10, 20, 30) savgol_filter(x, window_length=29, polyorder=3, mode='interp') with assert_raises(ValueError): savgol_filter(x, window_length=31, polyorder=3, mode='interp') savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') with assert_raises(ValueError): # Can only raise window_length error if axis 0 is the one being checked savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp') ","def test_sg_filter_valid_window_length_3d(): """"""Tests that the window_length check is using the correct axis."""""" x = np.ones((10, 20, 30)) savgol_filter(x, window_length=29, polyorder=3, mode='interp') with assert_raises(ValueError): savgol_filter(x, window_length=31, polyorder=3, mode='interp') savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') with assert_raises(ValueError): # Can only raise window_length error if axis 0 is the one being checked savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp') " 35364,"def _get_args_xsel(*args, **kwargs): type_ = kwargs.pop(""type_"", str(args[0]) if len(args) > 1 else """").upper() item = kwargs.pop(""item"", str(args[1]) if len(args) > 1 else """").upper() comp = kwargs.pop(""comp"", str(args[2]) if len(args) > 2 else """").upper() vmin = kwargs.pop(""vmin"", args[3] if len(args) > 3 else """") vmax = kwargs.pop(""vmax"", args[4] if len(args) > 4 else """") vinc = kwargs.pop(""vinc"", args[5] if len(args) > 5 else """") kabs = kwargs.pop(""kabs"", args[6] if len(args) > 6 else """") return type_, item, comp, vmin, vmax, vinc, kabs, kwargs ","def _get_args_xsel(*args, **kwargs): type_ = kwargs.pop(""type_"", str(args[0]) if len(args) else """").upper() item = kwargs.pop(""item"", str(args[1]) if len(args) > 1 else """").upper() comp = kwargs.pop(""comp"", str(args[2]) if len(args) > 2 else """").upper() vmin = kwargs.pop(""vmin"", args[3] if len(args) > 3 else """") vmax = kwargs.pop(""vmax"", args[4] if len(args) > 4 else """") vinc = kwargs.pop(""vinc"", args[5] if len(args) > 5 else """") kabs = kwargs.pop(""kabs"", args[6] if len(args) > 6 else """") return type_, item, comp, vmin, vmax, vinc, kabs, kwargs " 17941,"def test_trace_enums(): tracer = Tracer() tracer.record_calculation_start(""A"", 2017) tracer.record_calculation_end(""A"", 2017, HousingOccupancyStatus.encode(np.array(['tenant']))) lines = tracer.computation_log() assert lines[0] == ' A<2017> >> [\'tenant\']' ","def test_trace_enums(): tracer = Tracer() tracer.record_calculation_start(""A"", 2017) tracer.record_calculation_end(""A"", 2017, HousingOccupancyStatus.encode(np.array(['tenant']))) lines = tracer.computation_log() assert lines[0] == "" A<2017> >> ['tenant']"" " 49573,"def test_empty_partitions_with_value_counts(): # https://github.com/dask/dask/issues/7065 df = pd.DataFrame( data=[ [""a1"", ""b1""], [""a1"", None], [""a1"", ""b1""], [None, None], [None, None], [None, None], [""a3"", ""b3""], [""a3"", ""b3""], [""a5"", ""b5""], ], columns=[""A"", ""B""], ) expected = df.groupby(""A"")[""B""].value_counts() ddf = dd.from_pandas(df, npartitions=3) actual = ddf.groupby(""A"")[""B""].value_counts().compute() assert_eq(expected, actual) ","def test_empty_partitions_with_value_counts(): # https://github.com/dask/dask/issues/7065 df = pd.DataFrame( data=[ [""a1"", ""b1""], [""a1"", None], [""a1"", ""b1""], [None, None], [None, None], [None, None], [""a3"", ""b3""], [""a3"", ""b3""], [""a5"", ""b5""], ], columns=[""A"", ""B""], ) expected = df.groupby(""A"")[""B""].value_counts() ddf = dd.from_pandas(df, npartitions=3) actual = ddf.groupby(""A"")[""B""].value_counts() assert_eq(expected, actual) " 38080,"def test_earth_relief_holes(): """""" Check that the @earth_relief_20m_holes.grd dataset loads without errors. """""" grid = load_sample_data(""earth_relief_holes"") assert grid.shape == (30, 30) npt.assert_allclose(grid.max(), 1878) npt.assert_allclose(grid.min(), -4947) # Test for the NaN values in the remote file assert math.isnan(grid[2, 19]) ","def test_earth_relief_holes(): """""" Check that the @earth_relief_20m_holes.grd dataset loads without errors. """""" grid = load_sample_data(name=""earth_relief_holes"") assert grid.shape == (30, 30) npt.assert_allclose(grid.max(), 1878) npt.assert_allclose(grid.min(), -4947) # Test for the NaN values in the remote file assert math.isnan(grid[2, 19]) " 3387,"def _get_dsym_url(bundles: Optional[List[JSONData]]) -> Union[NoDsymUrl, str]: """"""Returns the dSYMs URL from the extracted from the build bundles."""""" # https://developer.apple.com/documentation/appstoreconnectapi/build/relationships/buildbundles # https://developer.apple.com/documentation/appstoreconnectapi/buildbundle/attributes # If you ever write code for this here you probably will find an # includesSymbols attribute in the buildBundles and wonder why we ignore # it. Then you'll look at it and wonder why it doesn't match anything # to do with whether dSYMs can be downloaded or not. This is because # the includesSymbols only indicates whether App Store Connect has full # symbol names or not, it does not have anything to do with whether it # was a bitcode upload or a native upload. And whether dSYMs are # available for download only depends on whether it was a bitcode # upload. if bundles is None: bundles = [] # Remove all bundles associated with app clips, those don't have dSYMS or really any useful # data since they're not apps themselves app_bundles = [ app_bundle for app_bundle in bundles if safe.get_path(app_bundle, ""attributes"", ""bundleType"", default=""APP"") != ""APP_CLIP"" ] if len(bundles) == 0: return NoDsymUrl.NOT_NEEDED if len(app_bundles) > 1: # We currently do not know how to handle these, we'll carry on # with the first bundle but report this as an error. sentry_sdk.capture_message(""len(buildBundles) != 1"") # Because we only ask for processingState=VALID builds we expect the # builds to be finished and if there are no dSYMs that means the # build doesn't need dSYMs, i.e. it not a bitcode build. bundle = app_bundles[0] url = safe.get_path(bundle, ""attributes"", ""dSYMUrl"", default=NoDsymUrl.NOT_NEEDED) if isinstance(url, (NoDsymUrl, str)): return url else: raise ValueError(f""Unexpected value in build bundle's dSYMUrl: {url}"") ","def _get_dsym_url(bundles: Optional[List[JSONData]]) -> Union[NoDsymUrl, str]: """"""Returns the dSYMs URL from the extracted from the build bundles."""""" # https://developer.apple.com/documentation/appstoreconnectapi/build/relationships/buildbundles # https://developer.apple.com/documentation/appstoreconnectapi/buildbundle/attributes # If you ever write code for this here you probably will find an # includesSymbols attribute in the buildBundles and wonder why we ignore # it. Then you'll look at it and wonder why it doesn't match anything # to do with whether dSYMs can be downloaded or not. This is because # the includesSymbols only indicates whether App Store Connect has full # symbol names or not, it does not have anything to do with whether it # was a bitcode upload or a native upload. And whether dSYMs are # available for download only depends on whether it was a bitcode # upload. if bundles is None: bundles = [] # Remove all bundles associated with app clips, those don't have dSYMS or really any useful # data since they're not apps themselves app_bundles = [ app_bundle for app_bundle in bundles if safe.get_path(app_bundle, ""attributes"", ""bundleType"", default=""APP"") != ""APP_CLIP"" ] if len(app_bundles) == 0: return NoDsymUrl.NOT_NEEDED if len(app_bundles) > 1: # We currently do not know how to handle these, we'll carry on # with the first bundle but report this as an error. sentry_sdk.capture_message(""len(buildBundles) != 1"") # Because we only ask for processingState=VALID builds we expect the # builds to be finished and if there are no dSYMs that means the # build doesn't need dSYMs, i.e. it not a bitcode build. bundle = app_bundles[0] url = safe.get_path(bundle, ""attributes"", ""dSYMUrl"", default=NoDsymUrl.NOT_NEEDED) if isinstance(url, (NoDsymUrl, str)): return url else: raise ValueError(f""Unexpected value in build bundle's dSYMUrl: {url}"") " 39531,"def split_markdown_front_matter(lines: str) -> Tuple[str, str]: r"""""" Split text into markdown front matter and the markdown body Return ("""", text) for text with non existing front matter >>> text='''--- ... title: DUMMY-SECURITY-2019-001 ... description: Incorrect access control. ... cves: [CVE-2042-1337] ... --- ... # Markdown starts here ... ''' >>> split_markdown_front_matter(text) ('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n') """""" fmlines = [] mdlines = [] splitter = mdlines lines = lines.replace(""\r\n"", ""\n"") for index, line in enumerate(lines.split(""\n"")): if index == 0 and line.strip().startswith(""---""): splitter = fmlines elif line.strip().startswith(""---""): splitter = mdlines else: splitter.append(line) return ""\n"".join(fmlines), ""\n"".join(mdlines) ","def split_markdown_front_matter(lines: str) -> Tuple[str, str]: r"""""" Split text into markdown front matter and the markdown body Return ("""", text) for text with non existing front matter >>> text='''--- ... title: DUMMY-SECURITY-2019-001 ... description: Incorrect access control. ... cves: [CVE-2042-1337] ... --- ... # Markdown starts here ... ''' >>> split_markdown_front_matter(text) ('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n') """""" fmlines = [] mdlines = [] splitter = mdlines lines = lines.replace(""\r\n"", ""\n"") for index, line in enumerate(lines.splitlines(False)): if index == 0 and line.strip().startswith(""---""): splitter = fmlines elif line.strip().startswith(""---""): splitter = mdlines else: splitter.append(line) return ""\n"".join(fmlines), ""\n"".join(mdlines) " 39551,"def extract_references(reference_data: List): """""" Yield `reference` by itearting over `reference_data` """""" for ref in reference_data: url = ref[""url""] if not isinstance(url, str): continue if ""GHSA-"" in url.upper(): reference = Reference(url=url, reference_id=url.split(""/"")[-1]) else: reference = Reference(url=url) yield reference ","def extract_references(reference_data: List): """""" Yield `reference` by iterating over `reference_data` """""" for ref in reference_data: url = ref[""url""] if not isinstance(url, str): continue if ""GHSA-"" in url.upper(): reference = Reference(url=url, reference_id=url.split(""/"")[-1]) else: reference = Reference(url=url) yield reference " 4185,"def read_dig_montage_brainvision(fname): r""""""Read subject-specific digitization montage from a brainvision file. Parameters ---------- bvct : path-like BrainVision CapTrak coordinates file from which to read digitization locations. This is typically in XML format. If str (filename), all other arguments are ignored. Returns ------- montage : instance of DigMontage The digitizer montage. See Also -------- DigMontage Montage read_montage """""" _check_fname(fname, overwrite='read', must_exist=True) data = _parse_brainvision_dig_montage(fname) # XXX: to change to the new naming in v.0.20 (all this block should go) data.pop('point_names') data['hpi_dev'] = data['hpi'] data['hpi'] = data.pop('elp') data['ch_pos'] = data.pop('dig_ch_pos') return make_dig_montage( **data, transform_to_head=False, compute_dev_head_t=False, ) ","def read_dig_montage_brainvision(fname): r""""""Read subject-specific digitization montage from a brainvision file. Parameters ---------- bvct : path-like BrainVision CapTrak coordinates file from which to read digitization locations. This is typically in XML format. If str (filename), all other arguments are ignored. Returns ------- montage : instance of DigMontage The digitized montage. See Also -------- DigMontage Montage read_montage """""" _check_fname(fname, overwrite='read', must_exist=True) data = _parse_brainvision_dig_montage(fname) # XXX: to change to the new naming in v.0.20 (all this block should go) data.pop('point_names') data['hpi_dev'] = data['hpi'] data['hpi'] = data.pop('elp') data['ch_pos'] = data.pop('dig_ch_pos') return make_dig_montage( **data, transform_to_head=False, compute_dev_head_t=False, ) " 34523,"def first_key(d: Dict[Text, Any], default_key: Any) -> Any: if len(d) > 1: for k in d.items(): if k != default_key: # we return the first key that is not the default key return k elif len(d) == 1: return list(d.keys())[0] else: return None ","def first_key(d: Dict[Text, Any], default_key: Any) -> Any: if len(d) > 1: for k in d.keys(): if k != default_key: # we return the first key that is not the default key return k elif len(d) == 1: return list(d.keys())[0] else: return None " 3116,"def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: """""" Compute the isin boolean array. Parameters ---------- comps : array-like values : array-like Returns ------- ndarray[bool] Same length as `comps`. """""" if not is_list_like(comps): raise TypeError( ""only list-like objects are allowed to be passed "" f""to isin(), you passed a [{type(comps).__name__}]"" ) if not is_list_like(values): raise TypeError( ""only list-like objects are allowed to be passed "" f""to isin(), you passed a [{type(values).__name__}]"" ) if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) # TODO: could use ensure_arraylike here comps = extract_array(comps, extract_numpy=True) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return cast(""Categorical"", comps).isin(values) comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = htable.ismember_object # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1_000_000 and not is_object_dtype(comps): # If the the values include nan we need to check for nan explicitly # since np.nan it not equal to np.nan if any(np.isnan(values)): f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c)) else: f = np.in1d elif is_integer_dtype(comps): try: values = values.astype(""int64"", copy=False) comps = comps.astype(""int64"", copy=False) f = htable.ismember_int64 except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype(""float64"", copy=False) comps = comps.astype(""float64"", copy=False) f = htable.ismember_float64 except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values) ","def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: """""" Compute the isin boolean array. Parameters ---------- comps : array-like values : array-like Returns ------- ndarray[bool] Same length as `comps`. """""" if not is_list_like(comps): raise TypeError( ""only list-like objects are allowed to be passed "" f""to isin(), you passed a [{type(comps).__name__}]"" ) if not is_list_like(values): raise TypeError( ""only list-like objects are allowed to be passed "" f""to isin(), you passed a [{type(values).__name__}]"" ) if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) # TODO: could use ensure_arraylike here comps = extract_array(comps, extract_numpy=True) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return cast(""Categorical"", comps).isin(values) comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = htable.ismember_object # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1_000_000 and not is_object_dtype(comps): # If the the values include nan we need to check for nan explicitly # since np.nan it not equal to np.nan if np.isnan(values).any(): f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c)) else: f = np.in1d elif is_integer_dtype(comps): try: values = values.astype(""int64"", copy=False) comps = comps.astype(""int64"", copy=False) f = htable.ismember_int64 except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype(""float64"", copy=False) comps = comps.astype(""float64"", copy=False) f = htable.ismember_float64 except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values) " 16662,"def fetch_site( connection: datapoint.Manager, latitude: float, longitude: float ) -> Site: """"""Fetch site information from Datapoint API."""""" try: return connection.get_nearest_forecast_site( latitude=latitude, longitude=longitude ) except datapoint.exceptions.APIException as err: _LOGGER.error(""Received error from Met Office Datapoint: %s"", err) return None ","def fetch_site( connection: datapoint.Manager, latitude: float, longitude: float ) -> Site | None: """"""Fetch site information from Datapoint API."""""" try: return connection.get_nearest_forecast_site( latitude=latitude, longitude=longitude ) except datapoint.exceptions.APIException as err: _LOGGER.error(""Received error from Met Office Datapoint: %s"", err) return None " 10646,"def create_windows_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None: """"""Create and return inventory for use in target Windows integration tests."""""" first = target_hosts[0] if isinstance(first, WindowsInventoryProfile): if args.explain: return try: shutil.copyfile(path,first.config.path) except shutil.SameFileError: pass return target_hosts = t.cast(list[WindowsRemoteProfile], target_hosts) hosts = [(target_host, target_host.wait_for_instance().connection) for target_host in target_hosts] windows_hosts = {sanitize_host_name(host.config.name): host.get_inventory_variables() for host, connection in hosts} inventory = Inventory( host_groups=dict( windows=windows_hosts, ), # The `testhost` group is needed to support the `binary_modules_winrm` integration test. # The test should be updated to remove the need for this. extra_groups={ 'testhost:children': [ 'windows', ], }, ) inventory.write(args, path) ","def create_windows_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None: """"""Create and return inventory for use in target Windows integration tests."""""" first = target_hosts[0] if isinstance(first, WindowsInventoryProfile): if args.explain: return try: shutil.copyfile(path, first.config.path) except shutil.SameFileError: pass return target_hosts = t.cast(list[WindowsRemoteProfile], target_hosts) hosts = [(target_host, target_host.wait_for_instance().connection) for target_host in target_hosts] windows_hosts = {sanitize_host_name(host.config.name): host.get_inventory_variables() for host, connection in hosts} inventory = Inventory( host_groups=dict( windows=windows_hosts, ), # The `testhost` group is needed to support the `binary_modules_winrm` integration test. # The test should be updated to remove the need for this. extra_groups={ 'testhost:children': [ 'windows', ], }, ) inventory.write(args, path) " 27983,"def remove_report_from_plist(plist_file_obj, skip_handler): """""" Parse the original plist content provided by the analyzer and return a new plist content where reports were removed if they should be skipped. If the remove failed for some reason None will be returned. WARN !!!! If the 'files' array in the plist is modified all of the diagnostic section (control, event ...) nodes should be re indexed to use the proper file array indexes!!! """""" report_data = None try: report_data = parse_plist(plist_file_obj) except plistlib.InvalidFileException as ifex: LOG.warning('Invalid plist file') return None except (ExpatError, TypeError, AttributeError) as ex: LOG.error(""Failed to parse plist content, "" ""keeping the original version"") LOG.error(ex) return None file_ids_to_remove = [] try: for i, f in enumerate(report_data['files']): if skip_handler.should_skip(f): file_ids_to_remove.append(i) kept_diagnostics, kept_files = get_kept_report_data(report_data, file_ids_to_remove) report_data['diagnostics'] = kept_diagnostics report_data['files'] = kept_files return plistlib.dumps(report_data) except KeyError: LOG.error(""Failed to modify plist content, "" ""keeping the original version"") return None ","def remove_report_from_plist(plist_file_obj, skip_handler): """""" Parse the original plist content provided by the analyzer and return a new plist content where reports were removed if they should be skipped. If the remove failed for some reason None will be returned. WARN !!!! If the 'files' array in the plist is modified all of the diagnostic section (control, event ...) nodes should be re indexed to use the proper file array indexes!!! """""" report_data = None try: report_data = parse_plist(plist_file_obj) except plistlib.InvalidFileException: LOG.warning('Invalid plist file') return None except (ExpatError, TypeError, AttributeError) as ex: LOG.error(""Failed to parse plist content, "" ""keeping the original version"") LOG.error(ex) return None file_ids_to_remove = [] try: for i, f in enumerate(report_data['files']): if skip_handler.should_skip(f): file_ids_to_remove.append(i) kept_diagnostics, kept_files = get_kept_report_data(report_data, file_ids_to_remove) report_data['diagnostics'] = kept_diagnostics report_data['files'] = kept_files return plistlib.dumps(report_data) except KeyError: LOG.error(""Failed to modify plist content, "" ""keeping the original version"") return None " 30812,"def add_pr_comment(comment): """"""Add comment to the pull request. Args: comment (string): The comment text. """""" token = os.environ['CONTENT_GITHUB_TOKEN'] branch_name = os.environ['CIRCLE_BRANCH'] sha1 = os.environ['CIRCLE_SHA1'] print('branch_name = ' + branch_name) print('sha1 = ' + sha1) query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name) url = 'https://api.github.com/search/issues' headers = {'Authorization': 'Bearer ' + token} try: print('reuest url = ' + url + query) print(url + query) res = requests.get(url + query, headers=headers, verify=False) res = handle_github_response(res) print(res) if res and res.get('total_count', 0) == 1: issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None if issue_url: res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False) handle_github_response(res) else: print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.' .format(branch_name)) except Exception as e: print_warning('Add pull request comment failed: {}'.format(e)) ","def add_pr_comment(comment): """"""Add comment to the pull request. Args: comment (string): The comment text. """""" token = os.environ['CONTENT_GITHUB_TOKEN'] branch_name = os.environ['CIRCLE_BRANCH'] sha1 = os.environ['CIRCLE_SHA1'] print('branch_name = ' + branch_name) print('sha1 = ' + sha1) query = f'?q={sha1}+repo:demisto/content+org:demisto+is:pr+is:open+head:{branch_name}' url = 'https://api.github.com/search/issues' headers = {'Authorization': 'Bearer ' + token} try: print('reuest url = ' + url + query) print(url + query) res = requests.get(url + query, headers=headers, verify=False) res = handle_github_response(res) print(res) if res and res.get('total_count', 0) == 1: issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None if issue_url: res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False) handle_github_response(res) else: print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.' .format(branch_name)) except Exception as e: print_warning('Add pull request comment failed: {}'.format(e)) " 31393,"def cisco_stealthwatch_list_tags_command(client: Client, tenant_id: str) -> CommandResults: """"""List tags (called host groups on the Stealthwatch API) based on tenant id Args: client (Client): Cisco Stealthwatch Client tenant_id (str): The id of the tenant to list its tags (tenant is a domain on the API) Returns: CommandResults: Raw response, outputs and readable outputs """""" response = client.list_tags(tenant_id) outputs = [] for tag in response.get('data', []): outputs.append(tag) outputs = sorted(outputs, key=lambda x: x.get('id')) table = tableToMarkdown(f'Tags (for tenant_id: {tenant_id}):', outputs, headers=['displayName', 'id'], removeNull=True) return CommandResults( outputs_prefix='CiscoStealthwatch.Tag', outputs_key_field='id', raw_response=response, outputs=outputs, readable_output=table ) ","def cisco_stealthwatch_list_tags_command(client: Client, tenant_id: str) -> CommandResults: """"""List tags (called host groups on the Stealthwatch API) based on tenant id Args: client (Client): Cisco Stealthwatch Client tenant_id (str): The id of the tenant to list its tags (tenant is a domain on the API) Returns: CommandResults: Raw response, outputs and readable outputs """""" response = client.list_tags(tenant_id) outputs = [] for tag in response.get('data', []): outputs.append(tag) outputs = sorted(outputs, key=lambda x: x.get('id')) table = tableToMarkdown(f'Tags for tenant_id: {tenant_id}:', outputs, headers=['displayName', 'id'], removeNull=True) return CommandResults( outputs_prefix='CiscoStealthwatch.Tag', outputs_key_field='id', raw_response=response, outputs=outputs, readable_output=table ) " 7372,"def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """""" Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out ","def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """""" Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `'full'` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out " 57896,"def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') # Closing sc_type ticket. This ticket type can be closed only when changing the ticket state. if (ticket_type == 'sc_task' or ticket_type == 'sc_req_item')\ and parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): parsed_args.data['state'] = '3' # Closing incident ticket. if ticket_type == 'incident' and parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): parsed_args.data['state'] = '7' fields = get_ticket_fields(parsed_args.data, ticket_type=ticket_type) if not params.get('close_ticket'): fields = {key: val for key, val in fields.items() if key != 'closed_at' and key != 'resolved_at'} demisto.debug(f'Sending update request to server {ticket_type}, {ticket_id}, {fields}') result = client.update(ticket_type, ticket_id, fields) demisto.info(f'Ticket Update result {result}') entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.upload_file(ticket_id, entry.get('id'), file_name + '_mirrored_from_xsoar' + file_extension, ticket_type) else: # Mirroring comment and work notes as entries tags = entry.get('tags', []) key = '' if params.get('work_notes_tag') in tags: key = 'work_notes' elif params.get('comment_tag') in tags: key = 'comments' user = entry.get('user', 'dbot') # Sometimes user is an empty str, not None, therefore nothing is displayed in ServiceNow user = 'dbot' if user == '' else user text = f""({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"" client.add_comment(ticket_id, ticket_type, key, text) return ticket_id ","def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str: """""" This command pushes local changes to the remote system. Args: client: XSOAR Client to use. args: args['data']: the data to send to the remote system args['entries']: the entries to send to the remote system args['incident_changed']: boolean telling us if the local incident indeed changed or not args['remote_incident_id']: the remote incident id params: entry_tags: the tags to pass to the entries (to separate between comments and work_notes) Returns: The remote incident id - ticket_id """""" parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id if parsed_args.incident_changed: demisto.debug(f'Incident changed: {parsed_args.incident_changed}') # Closing sc_type ticket. This ticket type can be closed only when changing the ticket state. if (ticket_type == 'sc_task' or ticket_type == 'sc_req_item')\ and parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): parsed_args.data['state'] = '3' # Closing incident ticket. if ticket_type == 'incident' and parsed_args.inc_status == IncidentStatus.DONE and params.get('close_ticket'): parsed_args.data['state'] = '7' fields = get_ticket_fields(parsed_args.data, ticket_type=ticket_type) if not params.get('close_ticket'): fields = {key: val for key, val in fields.items() if key != 'closed_at' and key != 'resolved_at'} demisto.debug(f'Sending update request to server {ticket_type}, {ticket_id}, {fields}') result = client.update(ticket_type, ticket_id, fields) demisto.info(f'Ticket Update result {result}') entries = parsed_args.entries if entries: demisto.debug(f'New entries {entries}') for entry in entries: demisto.debug(f'Sending entry {entry.get(""id"")}, type: {entry.get(""type"")}') # Mirroring files as entries if entry.get('type') == 3: path_res = demisto.getFilePath(entry.get('id')) full_file_name = path_res.get('name') file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: file_extension = '' client.upload_file(ticket_id, entry.get('id'), file_name + '_mirrored_from_xsoar' + file_extension, ticket_type) else: # Mirroring comment and work notes as entries tags = entry.get('tags', []) key = '' if params.get('work_notes_tag') in tags: key = 'work_notes' elif params.get('comment_tag') in tags: key = 'comments' # Sometimes user is an empty str, not None, therefore nothing is displayed in ServiceNow user = entry.get('user', 'dbot') or 'dbot' text = f""({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"" client.add_comment(ticket_id, ticket_type, key, text) return ticket_id " 27192,"def create_prop_docstring(prop_name, type_object, required, description, default, indent_num, is_flow_type=False): """""" Create the Dash component prop docstring Parameters ---------- prop_name: str Name of the Dash component prop type_object: dict react-docgen-generated prop type dictionary required: bool Component is required? description: str Dash component description default: dict Either None if a default value is not defined, or dict containing the key 'value' that defines a default value for the prop indent_num: int Number of indents to use for the context block (creates 2 spaces for every indent) is_flow_type: bool Does the prop use Flow types? Otherwise, uses PropTypes Returns ------- str Dash component prop docstring """""" py_type_name = js_to_py_type( type_object=type_object, is_flow_type=is_flow_type, indent_num=indent_num + 1) indent_spacing = ' ' * indent_num if default is None: default = '' else: default = default['value'] if default in ['true', 'false']: default = default.title() is_required = 'optional' if required: is_required = 'required' elif default and default not in ['null', '{}', '[]']: is_required = 'default {}'.format( default.replace('\n', '\n' + indent_spacing) ) if '\n' in py_type_name: return '{indent_spacing}- {name} (dict; {is_required}): ' \ '{description}{period}\n' \ '{indent_spacing}{name} is a {type}'.format( indent_spacing=indent_spacing, name=prop_name, type=py_type_name, description=description.strip().strip('.'), period='. ' if description else '', is_required=is_required) return '{indent_spacing}- {name} ({type}' \ '{is_required}){description}'.format( indent_spacing=indent_spacing, name=prop_name, type='{}; '.format(py_type_name) if py_type_name else '', description=( ': {}'.format(description) if description != '' else '' ), is_required=is_required) ","def create_prop_docstring(prop_name, type_object, required, description, default, indent_num, is_flow_type=False): """""" Create the Dash component prop docstring Parameters ---------- prop_name: str Name of the Dash component prop type_object: dict react-docgen-generated prop type dictionary required: bool Component is required? description: str Dash component description default: dict Either None if a default value is not defined, or dict containing the key 'value' that defines a default value for the prop indent_num: int Number of indents to use for the context block (creates 2 spaces for every indent) is_flow_type: bool Does the prop use Flow types? Otherwise, uses PropTypes Returns ------- str Dash component prop docstring """""" py_type_name = js_to_py_type( type_object=type_object, is_flow_type=is_flow_type, indent_num=indent_num + 1) indent_spacing = ' ' * indent_num if default is None: default = '' else: default = default['value'] if default in ['true', 'false']: default = default.title() is_required = 'optional' if required: is_required = 'required' elif default and default not in ['null', '{}', '[]']: is_required = 'default {}'.format( default.replace('\n', '\n' + indent_spacing) ) if '\n' in py_type_name: return '{indent_spacing}- {name} (dict; {is_required}): ' \ '{description}{period}\n' \ '{indent_spacing} {name} is a {type}'.format( indent_spacing=indent_spacing, name=prop_name, type=py_type_name, description=description.strip().strip('.'), period='. ' if description else '', is_required=is_required) return '{indent_spacing}- {name} ({type}' \ '{is_required}){description}'.format( indent_spacing=indent_spacing, name=prop_name, type='{}; '.format(py_type_name) if py_type_name else '', description=( ': {}'.format(description) if description != '' else '' ), is_required=is_required) " 39000,"def validator( *fields: str, pre: bool = False, each_item: bool = False, always: bool = False, check_fields: bool = True, whole: bool = None, allow_reuse: bool = False, ) -> Callable[[AnyCallable], classmethod]: """""" Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model :param allow_reuse: whether to track and raise an error if another validator refers to the decorated function """""" if not fields: raise ConfigError('validator with no fields specified') elif isinstance(fields[0], FunctionType): raise ConfigError( ""validators should be used with fields and keyword arguments, not bare. "" # noqa: Q000 ""E.g. usage should be `@validator('', ...)`"" ) elif not all([isinstance(field, str) for field in fields]): raise ConfigError( ""validator fields should be passed as separate string args. Do not pass multiple fields as a list, etc. "" ""E.g. usage should be `@validator('', '', ...)` "" ""NOT `@validator(['', '', ...], ...)`"" ) if whole is not None: warnings.warn( 'The ""whole"" keyword argument is deprecated, use ""each_item"" (inverse meaning, default False) instead', DeprecationWarning, ) assert each_item is False, '""each_item"" and ""whole"" conflict, remove ""whole""' each_item = not whole def dec(f: AnyCallable) -> classmethod: f_cls = _prepare_validator(f, allow_reuse) setattr( f_cls, VALIDATOR_CONFIG_KEY, ( fields, Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields), ), ) return f_cls return dec ","def validator( *fields: str, pre: bool = False, each_item: bool = False, always: bool = False, check_fields: bool = True, whole: bool = None, allow_reuse: bool = False, ) -> Callable[[AnyCallable], classmethod]: """""" Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model :param allow_reuse: whether to track and raise an error if another validator refers to the decorated function """""" if not fields: raise ConfigError('validator with no fields specified') elif isinstance(fields[0], FunctionType): raise ConfigError( ""validators should be used with fields and keyword arguments, not bare. "" # noqa: Q000 ""E.g. usage should be `@validator('', ...)`"" ) elif not all(isinstance(field, str) for field in fields): raise ConfigError( ""validator fields should be passed as separate string args. Do not pass multiple fields as a list, etc. "" ""E.g. usage should be `@validator('', '', ...)` "" ""NOT `@validator(['', '', ...], ...)`"" ) if whole is not None: warnings.warn( 'The ""whole"" keyword argument is deprecated, use ""each_item"" (inverse meaning, default False) instead', DeprecationWarning, ) assert each_item is False, '""each_item"" and ""whole"" conflict, remove ""whole""' each_item = not whole def dec(f: AnyCallable) -> classmethod: f_cls = _prepare_validator(f, allow_reuse) setattr( f_cls, VALIDATOR_CONFIG_KEY, ( fields, Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields), ), ) return f_cls return dec " 10515,"def present(module, dest, regexp, literal, line, insertafter, insertbefore, create, backup, backrefs, firstmatch): diff = {'before': '', 'after': '', 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) b_destpath = os.path.dirname(b_dest) if b_destpath and not os.path.exists(b_destpath) and not module.check_mode: try: os.makedirs(b_destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (b_destpath, e[0], e[1])) b_lines = [] else: with open(b_dest, 'rb') as f: b_lines = f.readlines() if module._diff: diff['before'] = to_native(b''.join(b_lines)) if regexp is not None: bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) if insertafter not in (None, 'BOF', 'EOF'): bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: bre_ins = None # index[0] is the line num where regexp has been found # index[1] is the line num where insertafter/insertbefore has been found index = [-1, -1] match = None exact_line_match = False b_line = to_bytes(line, errors='surrogate_or_strict') # The module's doc says # ""If regular expressions are passed to both regexp and # insertafter, insertafter is only honored if no match for regexp is found."" # Therefore: # 1. regexp or literal was found -> ignore insertafter, replace the founded line # 2. regexp or literal was not found -> insert the line after 'insertafter' or 'insertbefore' line # Given the above: # 1. First check that there is no match for regexp: if regexp is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = bre_m.search(b_cur_line) if match_found: index[0] = lineno match = match_found if firstmatch: break # 2. Second check that there is no match for literal: if literal is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = to_bytes( literal, errors='surrogate_or_strict') in b_cur_line if match_found: index[0] = lineno match = match_found if firstmatch: break # 3. When no match found on the previous step, # parse for searching insertafter/insertbefore: if not match: for lineno, b_cur_line in enumerate(b_lines): if b_line == b_cur_line.rstrip(b'\r\n'): index[0] = lineno exact_line_match = True elif bre_ins is not None and bre_ins.search(b_cur_line): if insertafter: # + 1 for the next line index[1] = lineno + 1 if firstmatch: break if insertbefore: # index[1] for the previous line index[1] = lineno if firstmatch: break msg = '' changed = False b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict') # Exact line or Regexp matched a line in the file if index[0] != -1: if backrefs and match: b_new_line = match.expand(b_line) else: # Don't do backref expansion if not asked. b_new_line = b_line if not b_new_line.endswith(b_linesep): b_new_line += b_linesep # If no regexp or literal was given and no line match is found anywhere in the file, # insert the line appropriately if using insertbefore or insertafter if regexp is None and literal is None and match is None and not exact_line_match: # Insert lines if insertafter and insertafter != 'EOF': # Ensure there is a line separator after the found string # at the end of the file. if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines[-1] = b_lines[-1] + b_linesep # If the line to insert after is at the end of the file # use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif insertbefore and insertbefore != 'BOF': # If the line to insert before is at the beginning of the file # use the appropriate index value. if index[1] <= 0: if b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[0]] != b_new_line: b_lines[index[0]] = b_new_line msg = 'line replaced' changed = True elif backrefs: # Do absolutely nothing, since it's not safe generating the line # without the regexp matching to populate the backrefs. pass # Add it to the beginning of the file elif insertbefore == 'BOF' or insertafter == 'BOF': b_lines.insert(0, b_line + b_linesep) msg = 'line added' changed = True # Add it to the end of the file if requested or # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines.append(b_linesep) b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif insertafter and index[1] != -1: # Don't insert the line if it already matches at the index. # If the line to insert after is at the end of the file use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_line != b_lines[index[1]].rstrip(b'\n\r'): b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True # insert matched, but not the regexp or literal else: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True if module._diff: diff['after'] = to_native(b''.join(b_lines)) backupdest = """" if changed and not module.check_mode: if backup and os.path.exists(b_dest): backupdest = module.backup_local(dest) write_changes(module, b_lines, dest) if module.check_mode and not os.path.exists(b_dest): module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % dest attr_diff['after_header'] = '%s (file attributes)' % dest difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) ","def present(module, dest, regexp, literal, line, insertafter, insertbefore, create, backup, backrefs, firstmatch): diff = {'before': '', 'after': '', 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) b_destpath = os.path.dirname(b_dest) if b_destpath and not os.path.exists(b_destpath) and not module.check_mode: try: os.makedirs(b_destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (b_destpath, e[0], e[1])) b_lines = [] else: with open(b_dest, 'rb') as f: b_lines = f.readlines() if module._diff: diff['before'] = to_native(b''.join(b_lines)) if regexp is not None: bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) if insertafter not in (None, 'BOF', 'EOF'): bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: bre_ins = None # index[0] is the line num where regexp has been found # index[1] is the line num where insertafter/insertbefore has been found index = [-1, -1] match = None exact_line_match = False b_line = to_bytes(line, errors='surrogate_or_strict') # The module's doc says # ""If regular expressions are passed to both regexp and # insertafter, insertafter is only honored if no match for regexp is found."" # Therefore: # 1. regexp or literal was found -> ignore insertafter, replace the founded line # 2. regexp or literal was not found -> insert the line after 'insertafter' or 'insertbefore' line # Given the above: # 1. First check that there is no match for regexp: if regexp is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = bre_m.search(b_cur_line) if match_found: index[0] = lineno match = match_found if firstmatch: break # 2. Second check that there is no match for literal: if literal is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = to_bytes(literal, errors='surrogate_or_strict') in b_cur_line if match_found: index[0] = lineno match = match_found if firstmatch: break # 3. When no match found on the previous step, # parse for searching insertafter/insertbefore: if not match: for lineno, b_cur_line in enumerate(b_lines): if b_line == b_cur_line.rstrip(b'\r\n'): index[0] = lineno exact_line_match = True elif bre_ins is not None and bre_ins.search(b_cur_line): if insertafter: # + 1 for the next line index[1] = lineno + 1 if firstmatch: break if insertbefore: # index[1] for the previous line index[1] = lineno if firstmatch: break msg = '' changed = False b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict') # Exact line or Regexp matched a line in the file if index[0] != -1: if backrefs and match: b_new_line = match.expand(b_line) else: # Don't do backref expansion if not asked. b_new_line = b_line if not b_new_line.endswith(b_linesep): b_new_line += b_linesep # If no regexp or literal was given and no line match is found anywhere in the file, # insert the line appropriately if using insertbefore or insertafter if regexp is None and literal is None and match is None and not exact_line_match: # Insert lines if insertafter and insertafter != 'EOF': # Ensure there is a line separator after the found string # at the end of the file. if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines[-1] = b_lines[-1] + b_linesep # If the line to insert after is at the end of the file # use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif insertbefore and insertbefore != 'BOF': # If the line to insert before is at the beginning of the file # use the appropriate index value. if index[1] <= 0: if b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[0]] != b_new_line: b_lines[index[0]] = b_new_line msg = 'line replaced' changed = True elif backrefs: # Do absolutely nothing, since it's not safe generating the line # without the regexp matching to populate the backrefs. pass # Add it to the beginning of the file elif insertbefore == 'BOF' or insertafter == 'BOF': b_lines.insert(0, b_line + b_linesep) msg = 'line added' changed = True # Add it to the end of the file if requested or # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines.append(b_linesep) b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif insertafter and index[1] != -1: # Don't insert the line if it already matches at the index. # If the line to insert after is at the end of the file use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_line != b_lines[index[1]].rstrip(b'\n\r'): b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True # insert matched, but not the regexp or literal else: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True if module._diff: diff['after'] = to_native(b''.join(b_lines)) backupdest = """" if changed and not module.check_mode: if backup and os.path.exists(b_dest): backupdest = module.backup_local(dest) write_changes(module, b_lines, dest) if module.check_mode and not os.path.exists(b_dest): module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % dest attr_diff['after_header'] = '%s (file attributes)' % dest difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist) " 24992,"def _is_trivial_super_delegation(function: nodes.FunctionDef) -> bool: """"""Check whether a function definition is a method consisting only of a call to the same function on the superclass. """""" if ( not function.is_method() # With decorators is a change of use or function.decorators ): return False body = function.body if len(body) != 1: # Multiple statements, which means this overridden method # could do multiple things we are not aware of. return False statement = body[0] if not isinstance(statement, (nodes.Expr, nodes.Return)): # Doing something else than what we are interested into. return False call = statement.value if ( not isinstance(call, nodes.Call) # Not a super() attribute access. or not isinstance(call.func, nodes.Attribute) ): return False # Should be a super call. try: super_call = next(call.func.expr.infer()) except astroid.InferenceError: return False else: if not isinstance(super_call, astroid.objects.Super): return False # The name should be the same. if call.func.attrname != function.name: return False # Should be a super call with the MRO pointer being the # current class and the type being the current instance. current_scope = function.parent.scope() if ( super_call.mro_pointer != current_scope or not isinstance(super_call.type, astroid.Instance) or super_call.type.name != current_scope.name ): return False return True ","def _is_trivial_super_delegation(function: nodes.FunctionDef) -> bool: """"""Check whether a function definition is a method consisting only of a call to the same function on the superclass. """""" if ( not function.is_method() # With decorators is a change of use or function.decorators ): return False body = function.body if len(body) != 1: # Multiple statements, which means this overridden method # could do multiple things we are not aware of. return False statement = body[0] if not isinstance(statement, (nodes.Expr, nodes.Return)): # Doing something else than what we are interested in. return False call = statement.value if ( not isinstance(call, nodes.Call) # Not a super() attribute access. or not isinstance(call.func, nodes.Attribute) ): return False # Should be a super call. try: super_call = next(call.func.expr.infer()) except astroid.InferenceError: return False else: if not isinstance(super_call, astroid.objects.Super): return False # The name should be the same. if call.func.attrname != function.name: return False # Should be a super call with the MRO pointer being the # current class and the type being the current instance. current_scope = function.parent.scope() if ( super_call.mro_pointer != current_scope or not isinstance(super_call.type, astroid.Instance) or super_call.type.name != current_scope.name ): return False return True " 32594,"def test_get_a_list_of_threats_command(): """""" When: - Retrieving list of cases identified Then - Assert the context data is as expected. - Assert output prefix data is as expected """""" client = Client(base_url=""url"") messages, last_run = get_threats(client, '2022-04-02T18:44:38Z') assert messages == [{'abxMessageId': 3, 'receivedTime': '2022-06-01T18:44:38Z', 'threatId': '123456789-1'}, {'abxMessageId': 3, 'receivedTime': '2022-06-02T18:44:38Z', 'threatId': '123456789-2'}, {'abxMessageId': 3, 'receivedTime': '2022-06-03T18:44:38Z', 'threatId': '123456789-3'}, {'abxMessageId': 2, 'receivedTime': '2022-08-01T18:44:38Z', 'threatId': '123456789-1'}, {'abxMessageId': 2, 'receivedTime': '2022-08-02T18:44:38Z', 'threatId': '123456789-2'}, {'abxMessageId': 2, 'receivedTime': '2022-08-03T18:44:38Z', 'threatId': '123456789-3'}] ","def test_get_a_list_of_threats_command(): """""" When: - Retrieving list of cases identified Then - Assert the context data is as expected. - Assert output prefix data is as expected """""" client = Client(base_url=""url"") messages, last_run = get_threats(client, after='2022-04-02T18:44:38Z') assert messages == [{'abxMessageId': 3, 'receivedTime': '2022-06-01T18:44:38Z', 'threatId': '123456789-1'}, {'abxMessageId': 3, 'receivedTime': '2022-06-02T18:44:38Z', 'threatId': '123456789-2'}, {'abxMessageId': 3, 'receivedTime': '2022-06-03T18:44:38Z', 'threatId': '123456789-3'}, {'abxMessageId': 2, 'receivedTime': '2022-08-01T18:44:38Z', 'threatId': '123456789-1'}, {'abxMessageId': 2, 'receivedTime': '2022-08-02T18:44:38Z', 'threatId': '123456789-2'}, {'abxMessageId': 2, 'receivedTime': '2022-08-03T18:44:38Z', 'threatId': '123456789-3'}] " 1420,"def generate_table(contributors): lines = [ ("".. raw :: html\n""), ("" ""), (""
""), ("" ""), ] for contributor in group_iterable(contributors): lines.append(""
"") lines.append( ""
"" % (contributor[""html_url""], contributor[""avatar_url""])) lines.append(""

%s

"" % (contributor[""name""], )) lines.append(""
"") lines.append(""
"") return '\n'.join(lines) ","def generate_table(contributors): lines = [ ("".. raw :: html\n""), ("" ""), (""
""), ("" ""), ] for contributor in group_iterable(contributors): lines.append(""
"") lines.append( ""
"" % (contributor[""html_url""], contributor[""avatar_url""])) lines.append(""

%s

"" % (contributor[""name""], )) lines.append(""
"") lines.append(""
"") return '\n'.join(lines) " 3108,"def test_categorical_transform_aggregate(): # GH 29037 df = pd.DataFrame( { ""package_id"": [1, 1, 1, 2, 2, 3], ""status"": [ ""Waiting"", ""OnTheWay"", ""Delivered"", ""Waiting"", ""OnTheWay"", ""Waiting"", ], } ) delivery_status_type = pd.CategoricalDtype( categories=[""Waiting"", ""OnTheWay"", ""Delivered""], ordered=True ) df[""status""] = df[""status""].astype(delivery_status_type) df[""last_status""] = df.groupby(""package_id"")[""status""].transform(max) result = df.copy() expected = pd.DataFrame( { ""package_id"": [1, 1, 1, 2, 2, 3], ""status"": [ ""Waiting"", ""OnTheWay"", ""Delivered"", ""Waiting"", ""OnTheWay"", ""Waiting"", ], ""last_status"": [ ""Delivered"", ""Delivered"", ""Delivered"", ""OnTheWay"", ""OnTheWay"", ""Waiting"", ], } ) expected[""status""] = expected[""status""].astype(delivery_status_type) tm.assert_frame_equal(result, expected) ","def test_categorical_transform(): # GH 29037 df = pd.DataFrame( { ""package_id"": [1, 1, 1, 2, 2, 3], ""status"": [ ""Waiting"", ""OnTheWay"", ""Delivered"", ""Waiting"", ""OnTheWay"", ""Waiting"", ], } ) delivery_status_type = pd.CategoricalDtype( categories=[""Waiting"", ""OnTheWay"", ""Delivered""], ordered=True ) df[""status""] = df[""status""].astype(delivery_status_type) df[""last_status""] = df.groupby(""package_id"")[""status""].transform(max) result = df.copy() expected = pd.DataFrame( { ""package_id"": [1, 1, 1, 2, 2, 3], ""status"": [ ""Waiting"", ""OnTheWay"", ""Delivered"", ""Waiting"", ""OnTheWay"", ""Waiting"", ], ""last_status"": [ ""Delivered"", ""Delivered"", ""Delivered"", ""OnTheWay"", ""OnTheWay"", ""Waiting"", ], } ) expected[""status""] = expected[""status""].astype(delivery_status_type) tm.assert_frame_equal(result, expected) " 1793,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None, sample_weight=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, default=None Pseudo-random number generator to control the permutations of each feature. Pass an int to get reproducible results across function calls. See :term: `Glossary `. sample_weight : array-like of shape (n_samples,), optional Sample weights. Returns ------- result : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] >>> y = [1, 1, 1, 0, 0, 0] >>> clf = LogisticRegression().fit(X, y) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) >>> result.importances_mean array([0.4666..., 0. , 0. ]) >>> result.importances_std array([0.2211..., 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y, sample_weight) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, sample_weight, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None, sample_weight=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, default=None Pseudo-random number generator to control the permutations of each feature. Pass an int to get reproducible results across function calls. See :term: `Glossary `. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- result : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] >>> y = [1, 1, 1, 0, 0, 0] >>> clf = LogisticRegression().fit(X, y) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) >>> result.importances_mean array([0.4666..., 0. , 0. ]) >>> result.importances_std array([0.2211..., 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y, sample_weight) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, sample_weight, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 28178,"def test_get_varlen_array_in_scalar_param_data(varlen_array_in_scalar_dataset): input_names = ['testparameter'] expected_names = {} expected_names['testparameter'] = ['testparameter', 'scalarparam', 'this_setpoint'] expected_shapes = {} n = 9 n_points = (n*(n+1))//2 scalar_param_values = [] setpoint_param_values = [] for i in range(1, 10): for j in range(i): setpoint_param_values.append(j) scalar_param_values.append(i) np.random.seed(0) test_parameter_values = np.random.rand(n_points) scalar_param_values = np.array(scalar_param_values) setpoint_param_values = np.array(setpoint_param_values) expected_shapes['testparameter'] = {} expected_shapes['testparameter'] = [(n_points,), (n_points,)] expected_values = {} expected_values['testparameter'] = [ test_parameter_values.ravel(), scalar_param_values.ravel(), setpoint_param_values.ravel()] parameter_test_helper(varlen_array_in_scalar_dataset, input_names, expected_names, expected_shapes, expected_values) ","def test_get_varlen_array_in_scalar_param_data(varlen_array_in_scalar_dataset): input_names = ['testparameter'] expected_names = {} expected_names['testparameter'] = ['testparameter', 'scalarparam', 'this_setpoint'] expected_shapes = {} n = 9 n_points = (n*(n+1))//2 scalar_param_values = [] setpoint_param_values = [] for i in range(1, n + 1): for j in range(i): setpoint_param_values.append(j) scalar_param_values.append(i) np.random.seed(0) test_parameter_values = np.random.rand(n_points) scalar_param_values = np.array(scalar_param_values) setpoint_param_values = np.array(setpoint_param_values) expected_shapes['testparameter'] = {} expected_shapes['testparameter'] = [(n_points,), (n_points,)] expected_values = {} expected_values['testparameter'] = [ test_parameter_values.ravel(), scalar_param_values.ravel(), setpoint_param_values.ravel()] parameter_test_helper(varlen_array_in_scalar_dataset, input_names, expected_names, expected_shapes, expected_values) " 42157,"def test_roles_info_command(cog, ctx): everyone_role = MagicMock() everyone_role.name = '@everyone' # should be excluded in the output ctx.author.roles.append(everyone_role) ctx.guild.roles = ctx.author.roles cog.roles_info.can_run = AsyncMock() cog.roles_info.can_run.return_value = True coroutine = cog.roles_info.invoke(ctx) expected = Embed( title=""Role information"", colour=Colour.blurple(), description=f""`{ctx.guild.roles[0].id}` - {ctx.guild.roles[0].mention}\n"" ) expected.set_footer(text=""Total roles: 1"") assert asyncio.run(coroutine) is None # no rval ctx.send.assert_called_once() (_, kwargs) = ctx.send.call_args embed = kwargs.pop('embed') assert embed.title == expected.title assert embed.colour == expected.colour assert embed.description == expected.description assert embed.footer.text == expected.footer.text ","def test_roles_info_command(cog, ctx): everyone_role = MagicMock() everyone_role.name = '@everyone' # should be excluded in the output ctx.author.roles.append(everyone_role) ctx.guild.roles = ctx.author.roles cog.roles_info.can_run = AsyncMock() cog.roles_info.can_run.return_value = True coroutine = cog.roles_info.invoke(ctx) expected = Embed( title=""Role information"", colour=Colour.blurple(), description=f""`{ctx.guild.roles[0].id}` - {ctx.guild.roles[0].mention}\n"" ) expected.set_footer(text=""Total roles: 1"") assert asyncio.run(coroutine) is None # no rval ctx.send.assert_called_once() _, kwargs = ctx.send.call_args embed = kwargs.pop('embed') assert embed.title == expected.title assert embed.colour == expected.colour assert embed.description == expected.description assert embed.footer.text == expected.footer.text " 47888,"def main(): parser = argparse.ArgumentParser() parser.add_argument('--upgrade', action='store_true', help='Bump package versions') args = parser.parse_args() if sys.version_info[:2] != EXPECTED_PYTHON_VERSION: sys.exit(""run this with Python {}"".format('.'.join(map(str, EXPECTED_PYTHON_VERSION)))) if 'INTEL_OPENVINO_DIR' not in os.environ: sys.exit(""run OpenVINO toolkit's setupvars.sh before this"") openvino_dir = Path(os.environ['INTEL_OPENVINO_DIR']) pc = functools.partial(pip_compile, upgrade=args.upgrade) pc('ci/requirements-ac.txt', 'tools/accuracy_checker/requirements_core.in', 'tools/accuracy_checker/requirements.in') pc('ci/requirements-ac-test.txt', 'tools/accuracy_checker/requirements.in', 'tools/accuracy_checker/requirements-test.in', 'tools/accuracy_checker/requirements_core.in') pc('ci/requirements-conversion.txt', 'tools/downloader/requirements-pytorch.in', 'tools/downloader/requirements-caffe2.in', openvino_dir / 'deployment_tools/model_optimizer/requirements.txt') pc('ci/requirements-demos.txt', 'demos/python_demos/requirements.txt', openvino_dir / 'python/requirements.txt') pc('ci/requirements-downloader.txt', 'tools/downloader/requirements.in') ","def main(): parser = argparse.ArgumentParser() parser.add_argument('--upgrade', action='store_true', help='Bump package versions') args = parser.parse_args() if sys.version_info[:2] != EXPECTED_PYTHON_VERSION: sys.exit(""run this with Python {}"".format('.'.join(map(str, EXPECTED_PYTHON_VERSION)))) if 'INTEL_OPENVINO_DIR' not in os.environ: sys.exit(""run OpenVINO toolkit's setupvars.sh before this"") openvino_dir = Path(os.environ['INTEL_OPENVINO_DIR']) pc = functools.partial(pip_compile, upgrade=args.upgrade) pc('ci/requirements-ac.txt', 'tools/accuracy_checker/requirements-core.in', 'tools/accuracy_checker/requirements.in') pc('ci/requirements-ac-test.txt', 'tools/accuracy_checker/requirements.in', 'tools/accuracy_checker/requirements-test.in', 'tools/accuracy_checker/requirements_core.in') pc('ci/requirements-conversion.txt', 'tools/downloader/requirements-pytorch.in', 'tools/downloader/requirements-caffe2.in', openvino_dir / 'deployment_tools/model_optimizer/requirements.txt') pc('ci/requirements-demos.txt', 'demos/python_demos/requirements.txt', openvino_dir / 'python/requirements.txt') pc('ci/requirements-downloader.txt', 'tools/downloader/requirements.in') " 56232,"def convert_to_onnx(model, input_shapes, output_file, input_names, output_names): """"""Convert PyTorch model to ONNX and check the resulting onnx model"""""" output_file.parent.mkdir(parents=True, exist_ok=True) model.eval() dummy_inputs = [torch.randn(input_shape) for input_shape in input_shapes] model(*dummy_inputs) torch.onnx.export(model, tuple(dummy_inputs), str(output_file), verbose=False, opset_version=11, input_names=input_names.split(','), output_names=output_names.split(',')) model = onnx.load(str(output_file)) # Model Optimizer takes output names from ONNX node names if they exist. # However, the names PyTorch assigns to the ONNX nodes are generic and # non-descriptive (e.g. ""Gemm_151""). By deleting these names, we make # MO fall back to the ONNX output names, which we can set to whatever we want. for node in model.graph.node: node.ClearField('name') try: onnx.checker.check_model(model) print('ONNX check passed successfully.') except onnx.onnx_cpp2py_export.checker.ValidationError as exc: sys.exit('ONNX check failed with error: ' + str(exc)) onnx.save(model, str(output_file)) ","def convert_to_onnx(model, input_shapes, output_file, input_names, output_names): """"""Convert PyTorch model to ONNX and check the resulting onnx model"""""" output_file.parent.mkdir(parents=True, exist_ok=True) model.eval() dummy_inputs = tuple(torch.randn(input_shape) for input_shape in input_shapes) model(*dummy_inputs) torch.onnx.export(model, dummy_inputs, str(output_file), verbose=False, opset_version=11, input_names=input_names.split(','), output_names=output_names.split(',')) model = onnx.load(str(output_file)) # Model Optimizer takes output names from ONNX node names if they exist. # However, the names PyTorch assigns to the ONNX nodes are generic and # non-descriptive (e.g. ""Gemm_151""). By deleting these names, we make # MO fall back to the ONNX output names, which we can set to whatever we want. for node in model.graph.node: node.ClearField('name') try: onnx.checker.check_model(model) print('ONNX check passed successfully.') except onnx.onnx_cpp2py_export.checker.ValidationError as exc: sys.exit('ONNX check failed with error: ' + str(exc)) onnx.save(model, str(output_file)) " 40589,"def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, line_length_factor=1.25, potential_mode='simple', solver_name=""cbc"", algorithm=""kmeans"", extended_link_costs=0, focus_weights=None): if potential_mode == 'simple': p_nom_max_strategy = np.sum elif potential_mode == 'conservative': p_nom_max_strategy = np.min else: raise AttributeError(f""potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'"") if custom_busmap is False: busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) else: busmap = custom_busmap clustering = get_clustering_from_busmap( n, busmap, bus_strategies=dict(country=_make_consense(""Bus"", ""country"")), aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=[""Load"", ""StorageUnit""], line_length_factor=line_length_factor, generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': np.sum}, scale_link_capital_costs=False) if not n.links.empty: nc = clustering.network nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length') .div(nc.links.length).dropna()) nc.links['capital_cost'] = (nc.links['capital_cost'] .add((nc.links.length - n.links.length) .clip(lower=0).mul(extended_link_costs), fill_value=0)) return clustering ","def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, line_length_factor=1.25, potential_mode='simple', solver_name=""cbc"", algorithm=""kmeans"", extended_link_costs=0, focus_weights=None): if potential_mode == 'simple': p_nom_max_strategy = np.sum elif potential_mode == 'conservative': p_nom_max_strategy = np.min else: raise AttributeError(f""potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'"") if not custom_busmap: busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) else: busmap = custom_busmap clustering = get_clustering_from_busmap( n, busmap, bus_strategies=dict(country=_make_consense(""Bus"", ""country"")), aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=[""Load"", ""StorageUnit""], line_length_factor=line_length_factor, generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': np.sum}, scale_link_capital_costs=False) if not n.links.empty: nc = clustering.network nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length') .div(nc.links.length).dropna()) nc.links['capital_cost'] = (nc.links['capital_cost'] .add((nc.links.length - n.links.length) .clip(lower=0).mul(extended_link_costs), fill_value=0)) return clustering " 36285,"def get_json(session, url, params: dict = None): """""" Get JSON from a Forest endpoint. """""" logger.debug(f""Sending request to {url}: {params}"") res = session.get(url, params=params) if res.status_code >= 400: raise parse_error(res) return res.json() ","def get_json(session, url, params: dict = None): """""" Get JSON from a Forest endpoint. """""" logger.debug(f""Sending GET request to {url}: {params}"") res = session.get(url, params=params) if res.status_code >= 400: raise parse_error(res) return res.json() " 13315,"def _analyze_vulnerability(request, vulnerability_record, origin): metrics = request.find_service(IMetricsService, context=None) metrics.increment(f""warehouse.vulnerabilities.{origin}.received"") try: report = VulnerabilityReport.from_api_record(record=vulnerability_record) except InvalidVulnerabilityReport as exc: metrics.increment(f""warehouse.token_leak.{origin}.error.{exc.reason}"") raise metrics.increment(f""warehouse.vulnerabilities.{origin}.valid"") try: vulnerability = ( request.db.query(Vulnerability) .filter(Vulnerability.id == report.vulnerability_id) .one() ) if not report.versions: # No versions indicates the vulnerability is no longer considered # valid, so delete it. request.db.delete(vulnerability) return except NoResultFound: if not report.versions: return vulnerability = Vulnerability( id=report.vulnerability_id, link=report.advisory_link, source=origin ) request.db.add(vulnerability) try: project = ( request.db.query(Project) .filter( Project.normalized_name == func.normalize_pep426_name(report.project) ) .one() ) except NoResultFound: # TODO: metric return for version in report.versions: try: release = ( request.db.query(Release) .filter( (Release.project == project) & (Release.canonical_version == version) ) .one() ) except NoResultFound: # TODO: metric continue if release not in vulnerability.releases: vulnerability.releases.append(version) # Delete any releases that no longer apply. for release in list(vulnerability.releases): if release.canonical_version not in report.versions: vulnerability.releases.remove(release) metrics.increment(f""warehouse.token_leak.{origin}.processed"") ","def _analyze_vulnerability(request, vulnerability_record, origin): metrics = request.find_service(IMetricsService, context=None) metrics.increment(f""warehouse.vulnerabilities.{origin}.received"") try: report = VulnerabilityReport.from_api_record(record=vulnerability_record) except InvalidVulnerabilityReport as exc: metrics.increment(f""warehouse.token_leak.{origin}.error.{exc.reason}"") raise metrics.increment(f""warehouse.vulnerabilities.{origin}.valid"") try: vulnerability = ( request.db.query(Vulnerability) .filter(Vulnerability.id == report.vulnerability_id) .one() ) if not report.versions: # No versions indicates the vulnerability is no longer considered # valid, so delete it. request.db.delete(vulnerability) return except NoResultFound: if not report.versions: return vulnerability = Vulnerability( id=report.vulnerability_id, link=report.advisory_link, source=origin ) request.db.add(vulnerability) try: project = ( request.db.query(Project) .filter( Project.normalized_name == func.normalize_pep426_name(report.project) ) .one() ) except NoResultFound: # TODO: metric return for version in report.versions: try: release = ( request.db.query(Release) .filter( (Release.project == project) & (Release.canonical_version == version) ) .one() ) except NoResultFound: # TODO: metric continue if release not in vulnerability.releases: vulnerability.releases.append(version) # Unassociate any releases that no longer apply. for release in list(vulnerability.releases): if release.canonical_version not in report.versions: vulnerability.releases.remove(release) metrics.increment(f""warehouse.token_leak.{origin}.processed"") " 9853,"def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster = find_cluster_by_name(content, cluster_name) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = True results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) ","def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict( cluster_name=dict(type='str', required=True), evc_mode=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'present', ['cluster_name', 'evc_mode']] ] ) state = module.params['state'] cluster_name = module.params['cluster_name'] evc_mode = module.params['evc_mode'] content = connect_to_api(module, False) results = dict(changed=False, result=dict()) cluster = find_cluster_by_name(content, cluster_name) evcm = cluster.EvcManager() evc_state = evcm.evcState current_evc_mode = evc_state.currentEVCModeKey supported_evc_modes = evc_state.supportedEVCMode if state == 'present' and current_evc_mode != evc_mode: try: if not module.check_mode: evc_task = evcm.ConfigureEvcMode_Task(evc_mode) wait_for_task(evc_task) results['changed'] = True results['result'] = ""EVC Mode for '%s' has been enabled."" % (evc_mode) except TaskError as invalid_argument: module.fail_json(msg=""Failed to update EVC mode: %s"" % to_native(invalid_argument)) elif state == 'present' and current_evc_mode == evc_mode: results['changed'] = False results['result'] = ""EVC Mode for '%s' is already enabled."" % (evc_mode) elif state == 'absent' and not current_evc_mode: results['changed'] = False results['result'] = ""EVC Mode is already disabled."" elif state == 'absent': try: if not module.check_mode: evc_disable_task = evcm.DisableEvcMode_Task() changed, result = wait_for_task(evc_disable_task) results['changed'] = True results['result'] = ""EVC Mode has been disabled."" except TaskError as invalid_argument: module.fail_json(msg=""Failed to disable EVC mode: %s"" % to_native(invalid_argument)) module.exit_json(**results) " 8398,"def template_correlate(observed_spectrum, template_spectrum): """""" Compute cross-correlation of the observed and template spectra Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum, which will be correlated with the observed spectrum. Returns ------- tuple : (`~astropy.units.Quantity`, `~astropy.units.Quantity`) Arrays with correlation values and lags in km/s """""" # Normalize template normalization = _normalize(observed_spectrum, template_spectrum) # Correlation corr = np.correlate(observed_spectrum.flux.value, (template_spectrum.flux.value * normalization), mode='full') # Lag in km/s equiv = getattr(u.equivalencies, 'doppler_{0}'.format( observed_spectrum.velocity_convention))(observed_spectrum.rest_value) lag = observed_spectrum.spectral_axis.to(u.km / u.s, equivalencies=equiv) return (corr * u.dimensionless_unscaled, lag) ","def template_correlate(observed_spectrum, template_spectrum): """""" Compute cross-correlation of the observed and template spectra Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum, which will be correlated with the observed spectrum. Returns ------- (`~astropy.units.Quantity`, `~astropy.units.Quantity`) Arrays with correlation values and lags in km/s """""" # Normalize template normalization = _normalize(observed_spectrum, template_spectrum) # Correlation corr = np.correlate(observed_spectrum.flux.value, (template_spectrum.flux.value * normalization), mode='full') # Lag in km/s equiv = getattr(u.equivalencies, 'doppler_{0}'.format( observed_spectrum.velocity_convention))(observed_spectrum.rest_value) lag = observed_spectrum.spectral_axis.to(u.km / u.s, equivalencies=equiv) return (corr * u.dimensionless_unscaled, lag) " 11009,"def action_method(*, permissions=None, description=None): """""" Conveniently add attributes to an action method:: @admin.action_method( permissions=['publish'], description='Mark selected stories as published', ) def make_published(self, request, queryset): queryset.update(status='p') This is equivalent to setting the attributes on the method directly:: def make_published(self, request, queryset): queryset.update(status='p') make_published.allowed_permissions = ['publish'] make_published.short_description = 'Mark selected stories as published' """""" def decorator(f): if permissions is not None: f.allowed_permissions = permissions if description is not None: f.short_description = description return f return decorator ","def action_method(*, permissions=None, description=None): """""" Conveniently add attributes to an action function:: @admin.action_method( permissions=['publish'], description='Mark selected stories as published', ) def make_published(self, request, queryset): queryset.update(status='p') This is equivalent to setting the attributes on the method directly:: def make_published(self, request, queryset): queryset.update(status='p') make_published.allowed_permissions = ['publish'] make_published.short_description = 'Mark selected stories as published' """""" def decorator(f): if permissions is not None: f.allowed_permissions = permissions if description is not None: f.short_description = description return f return decorator " 8907,"def parse_insta_json(json): # Parse JSON content needed = _get_json_data(json) dimensions = needed.get('dimensions', {}) owner = needed.get('owner', {}) # Build bot response parts = [] # Title if needed.get('is_video'): title = ""Video by "" else: title = ""Photo by "" # Author iuser = owner.get('username') ifname = owner.get('full_name') if ifname and iuser: parts.append('%s %s (@%s)' % (title, ifname, iuser)) elif iuser: parts.append('%s @%s' % (title, iuser)) elif ifname: parts.append('%s %s' % (title, ifname)) else: parts.append('%s unknown user' % title) # Media caption try: icap = needed['edge_media_to_caption']['edges'][0]['node']['text'] # Strip newlines icap = icap.replace('\n', ' ') # Truncate caption icap = (icap[:256] + '…') if len(icap) > 256 else icap except (KeyError, IndexError): icap = None if icap: parts.append(icap) # Media width and height iwidth = dimensions.get('width') or None iheight = dimensions.get('height') or None if iwidth and iheight: parts.append('%sx%s' % (iwidth, iheight)) # Likes ilikes = str(needed.get('edge_media_preview_like', {}).get('count')) if ilikes and ilikes.isdigit(): parts.append( _format_counter(int(ilikes), 'No ♥ yet', '1 ♥', '{number} ♥s')) # Comments icomms = str(needed.get('edge_media_to_parent_comment', {}).get('count')) if icomms and icomms.isdigit(): parts.append(_format_counter(int(icomms), 'No comment', '1 comment', '{number} comments')) # Publishing date idate = needed.get('taken_at_timestamp') if idate: dateformat = '%Y-%m-%d %H:%M:%S' pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat) parts.append('Uploaded: %s' % pubdate) # Build the message return ' | '.join(parts) ","def parse_insta_json(json): # Parse JSON content needed = _get_json_data(json) dimensions = needed.get('dimensions', {}) owner = needed.get('owner', {}) # Build bot response parts = [] # Title if needed.get('is_video'): title = ""Video by "" else: title = ""Photo by "" # Author iuser = owner.get('username') ifname = owner.get('full_name') if ifname and iuser: parts.append('%s %s (@%s)' % (title, ifname, iuser)) elif iuser: parts.append('%s @%s' % (title, iuser)) elif ifname: parts.append('%s %s' % (title, ifname)) else: parts.append('%s unknown user' % title) # Media caption try: icap = needed['edge_media_to_caption']['edges'][0]['node']['text'] # Strip newlines icap = icap.replace('\n', ' ') # Truncate caption icap = (icap[:256] + '…') if len(icap) > 256 else icap except (KeyError, IndexError): icap = None if icap: parts.append(icap) # Media width and height iwidth = dimensions.get('width') or None iheight = dimensions.get('height') or None if iwidth and iheight: parts.append('%sx%s' % (iwidth, iheight)) # Likes ilikes = str(needed.get('edge_media_preview_like', {}).get('count')) if ilikes and ilikes.isdigit(): parts.append( _format_counter(int(ilikes), 'No ♥s yet', '1 ♥', '{number} ♥s')) # Comments icomms = str(needed.get('edge_media_to_parent_comment', {}).get('count')) if icomms and icomms.isdigit(): parts.append(_format_counter(int(icomms), 'No comment', '1 comment', '{number} comments')) # Publishing date idate = needed.get('taken_at_timestamp') if idate: dateformat = '%Y-%m-%d %H:%M:%S' pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat) parts.append('Uploaded: %s' % pubdate) # Build the message return ' | '.join(parts) " 39672,"def main(): module = ForemanEntityAnsibleModule( argument_spec=dict( updated_name=dict(), ), entity_spec=dict( name=dict(required=True), admin=dict(required=False, type='bool', default=False), users=dict(required=False, type='entity_list', flat_name='user_ids'), usergroups=dict(required=False, type='entity_list', flat_name='usergroup_ids'), roles=dict(required=False, type='entity_list', flat_name='role_ids'), ), ) entity_dict = module.clean_params() module.connect() entity = module.find_resource_by_name('usergroups', entity_dict['name'], failsafe=True) if not module.desired_absent: if entity and 'updated_name' in entity_dict: entity_dict['name'] = entity_dict.pop('updated_name') if 'roles' in entity_dict: entity_dict['roles'] = module.find_resources_by_name('roles', entity_dict['roles'], thin=True) if 'users' in entity_dict: entity_dict['users'] = module.find_resources('users', ['login=""{0}""'.format(login) for login in entity_dict['users']], thin=True) if 'usergroups' in entity_dict: entity_dict['usergroups'] = module.find_resources_by_name('usergroups', entity_dict['usergroups'], thin=True) changed = module.ensure_entity_state('usergroups', entity_dict, entity) module.exit_json(changed=changed, entity_dict=entity_dict) ","def main(): module = ForemanEntityAnsibleModule( argument_spec=dict( updated_name=dict(), ), entity_spec=dict( name=dict(required=True), admin=dict(required=False, type='bool', default=False), users=dict(required=False, type='entity_list', flat_name='user_ids'), usergroups=dict(required=False, type='entity_list', flat_name='usergroup_ids'), roles=dict(required=False, type='entity_list', flat_name='role_ids'), ), ) entity_dict = module.clean_params() module.connect() entity = module.find_resource_by_name('usergroups', entity_dict['name'], failsafe=True) if not module.desired_absent: if entity and 'updated_name' in entity_dict: entity_dict['name'] = entity_dict.pop('updated_name') if 'roles' in entity_dict: entity_dict['roles'] = module.find_resources_by_name('roles', entity_dict['roles'], thin=True) if 'users' in entity_dict: entity_dict['users'] = module.find_resources('users', ['login=""{0}""'.format(login) for login in entity_dict['users']], thin=True) if 'usergroups' in entity_dict: entity_dict['usergroups'] = module.find_resources_by_name('usergroups', entity_dict['usergroups'], thin=True) changed = module.ensure_entity_state('usergroups', entity_dict, entity) module.exit_json(changed=changed) " 29772,"def get_formatted_scans_key_row(dcm_fn): """""" Parameters ---------- item Returns ------- row: list [ISO acquisition time, performing physician name, random string] """""" dcm_data = dcm.read_file(dcm_fn, stop_before_pixels=True, force=True) # we need to store filenames and acquisition times # parse date and time and get it into isoformat try: date = dcm_data.ContentDate time = dcm_data.ContentTime.split('.')[0] td = time + date acq_time = datetime.strptime(td, '%H%M%S%Y%m%d').isoformat() except AttributeError as exc: lgr.warning(""Failed to get date/time for the content: %s"", str(exc)) acq_time = None # add random string # But let's make it reproducible by using all UIDs # (might change across versions?) randcontent = u''.join( [getattr(dcm_data, f) for f in sorted(dir(dcm_data)) if f.endswith('UID')] ) randstr = hashlib.md5(randcontent.encode()).hexdigest()[:8] try: perfphys = dcm_data.PerformingPhysicianName except AttributeError: perfphys = '' row = [acq_time, perfphys, randstr] # empty entries should be 'n/a' # https://github.com/dartmouth-pbs/heudiconv/issues/32 row = ['n/a' if not str(e) else e for e in row] return row ","def get_formatted_scans_key_row(dcm_fn): """""" Parameters ---------- item Returns ------- row: list [ISO acquisition time, performing physician name, random string] """""" dcm_data = dcm.read_file(dcm_fn, stop_before_pixels=True, force=True) # we need to store filenames and acquisition times # parse date and time and get it into isoformat try: date = dcm_data.ContentDate time = dcm_data.ContentTime.split('.')[0] td = time + date acq_time = datetime.strptime(td, '%H%M%S%Y%m%d').isoformat() except AttributeError as exc: lgr.warning(""Failed to get date/time for the content: %s"", str(exc)) acq_time = None # add random string # But let's make it reproducible by using all UIDs # (might change across versions?) randcontent = u''.join( [getattr(dcm_data, f) or '' for f in sorted(dir(dcm_data)) if f.endswith('UID')] ) randstr = hashlib.md5(randcontent.encode()).hexdigest()[:8] try: perfphys = dcm_data.PerformingPhysicianName except AttributeError: perfphys = '' row = [acq_time, perfphys, randstr] # empty entries should be 'n/a' # https://github.com/dartmouth-pbs/heudiconv/issues/32 row = ['n/a' if not str(e) else e for e in row] return row " 45796,"def normalize( data: torch.Tensor, mean: Union[torch.Tensor, float], std: Union[torch.Tensor, float] ) -> torch.Tensor: r""""""Normalize a tensor image with mean and standard deviation. .. math:: \text{input[channel] = (input[channel] - mean[channel]) / std[channel]} Where `mean` is :math:`(M_1, ..., M_n)` and `std` :math:`(S_1, ..., S_n)` for `n` channels, Args: data (torch.Tensor): Image tensor of size :math:`(*, C, ...)`. mean (Union[torch.Tensor, Tuple[float], float]): Mean for each channel. std (Union[torch.Tensor, Tuple[float], float]): Standard deviations for each channel. Return: torch.Tensor: Normalised tensor with same size as input :math:`(*, C, ...)`. Examples: >>> x = torch.rand(1, 4, 3, 3) >>> out = normalize(x, 0.0, 255.) >>> out.shape torch.Size([1, 4, 3, 3]) >>> x = torch.rand(1, 4, 3, 3) >>> mean = torch.zeros(1, 4) >>> std = 255. * torch.ones(1, 4) >>> out = normalize(x, mean, std) >>> out.shape torch.Size([1, 4, 3, 3]) """""" shape = data.shape if isinstance(mean, float): mean = torch.tensor([mean] * shape[1], device=data.device, dtype=data.dtype) if isinstance(std, float): std = torch.tensor([std] * shape[1], device=data.device, dtype=data.dtype) if isinstance(mean, tuple): assert len(mean) == len(shape) mean = torch.tensor(mean, device=data.device, dtype=data.dtype) if isinstance(std, tuple): assert len(std) == len(shape) std = torch.tensor(std, device=data.device, dtype=data.dtype) if not isinstance(data, torch.Tensor): raise TypeError(""data should be a tensor. Got {}"".format(type(data))) if not isinstance(mean, torch.Tensor): raise TypeError(""mean should be a tensor or a float. Got {}"".format(type(mean))) if not isinstance(std, torch.Tensor): raise TypeError(""std should be a tensor or float. Got {}"".format(type(std))) # Allow broadcast on channel dimension if mean.shape and mean.shape[0] != 1: if mean.shape[0] != data.shape[-3] and mean.shape[:2] != data.shape[:2]: raise ValueError(f""mean length and number of channels do not match. Got {mean.shape} and {data.shape}."") # Allow broadcast on channel dimension if std.shape and std.shape[0] != 1: if std.shape[0] != data.shape[-3] and std.shape[:2] != data.shape[:2]: raise ValueError(f""std length and number of channels do not match. Got {std.shape} and {data.shape}."") mean = torch.as_tensor(mean, device=data.device, dtype=data.dtype) std = torch.as_tensor(std, device=data.device, dtype=data.dtype) if mean.shape: mean = mean[..., :, None] if std.shape: std = std[..., :, None] out: torch.Tensor = (data.view(shape[0], shape[1], -1) - mean) / std return out.view(shape) ","def normalize( data: torch.Tensor, mean: Union[torch.Tensor, float], std: Union[torch.Tensor, float] ) -> torch.Tensor: r""""""Normalize a tensor image with mean and standard deviation. .. math:: \text{input[channel] = (input[channel] - mean[channel]) / std[channel]} Where `mean` is :math:`(M_1, ..., M_n)` and `std` :math:`(S_1, ..., S_n)` for `n` channels, Args: data (torch.Tensor): Image tensor of size :math:`(*, C, ...)`. mean (Union[torch.Tensor, Tuple[float, ...], float]): Mean for each channel. std (Union[torch.Tensor, Tuple[float], float]): Standard deviations for each channel. Return: torch.Tensor: Normalised tensor with same size as input :math:`(*, C, ...)`. Examples: >>> x = torch.rand(1, 4, 3, 3) >>> out = normalize(x, 0.0, 255.) >>> out.shape torch.Size([1, 4, 3, 3]) >>> x = torch.rand(1, 4, 3, 3) >>> mean = torch.zeros(1, 4) >>> std = 255. * torch.ones(1, 4) >>> out = normalize(x, mean, std) >>> out.shape torch.Size([1, 4, 3, 3]) """""" shape = data.shape if isinstance(mean, float): mean = torch.tensor([mean] * shape[1], device=data.device, dtype=data.dtype) if isinstance(std, float): std = torch.tensor([std] * shape[1], device=data.device, dtype=data.dtype) if isinstance(mean, tuple): assert len(mean) == len(shape) mean = torch.tensor(mean, device=data.device, dtype=data.dtype) if isinstance(std, tuple): assert len(std) == len(shape) std = torch.tensor(std, device=data.device, dtype=data.dtype) if not isinstance(data, torch.Tensor): raise TypeError(""data should be a tensor. Got {}"".format(type(data))) if not isinstance(mean, torch.Tensor): raise TypeError(""mean should be a tensor or a float. Got {}"".format(type(mean))) if not isinstance(std, torch.Tensor): raise TypeError(""std should be a tensor or float. Got {}"".format(type(std))) # Allow broadcast on channel dimension if mean.shape and mean.shape[0] != 1: if mean.shape[0] != data.shape[-3] and mean.shape[:2] != data.shape[:2]: raise ValueError(f""mean length and number of channels do not match. Got {mean.shape} and {data.shape}."") # Allow broadcast on channel dimension if std.shape and std.shape[0] != 1: if std.shape[0] != data.shape[-3] and std.shape[:2] != data.shape[:2]: raise ValueError(f""std length and number of channels do not match. Got {std.shape} and {data.shape}."") mean = torch.as_tensor(mean, device=data.device, dtype=data.dtype) std = torch.as_tensor(std, device=data.device, dtype=data.dtype) if mean.shape: mean = mean[..., :, None] if std.shape: std = std[..., :, None] out: torch.Tensor = (data.view(shape[0], shape[1], -1) - mean) / std return out.view(shape) " 51023,"def _get_timeout(path: str) -> ClientTimeout: """"""Return timeout for a URL path."""""" if NO_TIMEOUT.match(path): return ClientTimeout(connect=5) return ClientTimeout(connect=5, total=300) ","def _get_timeout(path: str) -> ClientTimeout: """"""Return timeout for a URL path."""""" if NO_TIMEOUT.match(path): return ClientTimeout(connect=10) return ClientTimeout(connect=10, total=300) " 30614,"def main(): """""" Main """""" commands = { 'test-module': test_module, 'fetch-incidents': fetch_incidents, 'digitalguardian-add-watchlist-entry': add_entry_to_watchlist, 'digitalguardian-check-watchlist-entry': check_watchlist_entry, 'digitalguardian-remove-watchlist-entry': rm_entry_from_watchlist, 'digitalguardian-add-componentlist-entry': add_entry_to_componentlist, 'digitalguardian-check-componentlist-entry': check_componentlist_entry, 'digitalguardian-remove-componentlist-entry': rm_entry_from_componentlist, } try: required_params = {'auth_url', 'arc_url', 'client_id', 'client_secret'} if demisto.params().keys() < required_params: missing_params = required_params - demisto.params().keys() return_error('Missing required parameter(s): %s' % missing_params) init_globals() command = demisto.command() LOG('Command being called is %s' % command) if command not in commands: return_error('Command ""%s"" not implemented' % command) command_fn = commands[command] command_fn() except Exception as e: return_error(e) ","def main(): """""" Main """""" commands = { 'test-module': test_module, 'fetch-incidents': fetch_incidents, 'digitalguardian-add-watchlist-entry': add_entry_to_watchlist, 'digitalguardian-check-watchlist-entry': check_watchlist_entry, 'digitalguardian-remove-watchlist-entry': rm_entry_from_watchlist, 'digitalguardian-add-componentlist-entry': add_entry_to_componentlist, 'digitalguardian-check-componentlist-entry': check_componentlist_entry, 'digitalguardian-remove-componentlist-entry': rm_entry_from_componentlist, } try: required_params = {'auth_url', 'arc_url', 'client_id', 'client_secret'} if demisto.params().keys() < required_params: missing_params = required_params - demisto.params().keys() return_error('Missing required parameter(s): %s' % missing_params) init_globals() command = demisto.command() LOG('Command being called is %s' % command) if command not in commands: return_error(f'Command ""{command}"" not implemented') command_fn = commands[command] command_fn() except Exception as e: return_error(e) " 52779,"def create_thumbnail(sourcename, size): source = default_storage.open(sourcename) image = Image.open(BytesIO(source.read())) try: image.load() except: raise ThumbnailError('Could not load image') frames = [resize_image(frame, size) for frame in ImageSequence.Iterator(image)] image_out = frames[0] save_kwargs = {} if source.name.lower().endswith('.jpg') or source.name.lower().endswith('.jpeg'): # Yields better file sizes for photos target_ext = 'jpeg' quality = 95 elif source.name.lower().endswith('.gif'): target_ext = 'gif' quality = None image_out.info = image.info save_kwargs = { 'append_images': frames[1:] if len(frames) > 1 else [], 'loop': image.info.get('loop', 0), 'save_all': True, } else: target_ext = 'png' quality = None checksum = hashlib.md5(image.tobytes()).hexdigest() name = checksum + '.' + size.replace('^', 'c') + '.' + target_ext buffer = BytesIO() if image_out.mode == ""P"" and source.name.lower().endswith('.png'): image_out = image_out.convert('RGBA') if image_out.mode not in (""1"", ""L"", ""RGB"", ""RGBA""): image_out = image_out.convert('RGB') image_out.save(fp=buffer, format=target_ext.upper(), quality=quality, **save_kwargs) imgfile = ContentFile(buffer.getvalue()) t = Thumbnail.objects.create(source=sourcename, size=size) t.thumb.save(name, imgfile) return t ","def create_thumbnail(sourcename, size): source = default_storage.open(sourcename) image = Image.open(BytesIO(source.read())) try: image.load() except: raise ThumbnailError('Could not load image') frames = [resize_image(frame, size) for frame in ImageSequence.Iterator(image)] image_out = frames[0] save_kwargs = {} if source.name.lower().endswith('.jpg') or source.name.lower().endswith('.jpeg'): # Yields better file sizes for photos target_ext = 'jpeg' quality = 95 elif source.name.lower().endswith('.gif') or source.name.lower().endswith('.png'): target_ext = source.name.lower()[-3:] quality = None image_out.info = image.info save_kwargs = { 'append_images': frames[1:] if len(frames) > 1 else [], 'loop': image.info.get('loop', 0), 'save_all': True, } else: target_ext = 'png' quality = None checksum = hashlib.md5(image.tobytes()).hexdigest() name = checksum + '.' + size.replace('^', 'c') + '.' + target_ext buffer = BytesIO() if image_out.mode == ""P"" and source.name.lower().endswith('.png'): image_out = image_out.convert('RGBA') if image_out.mode not in (""1"", ""L"", ""RGB"", ""RGBA""): image_out = image_out.convert('RGB') image_out.save(fp=buffer, format=target_ext.upper(), quality=quality, **save_kwargs) imgfile = ContentFile(buffer.getvalue()) t = Thumbnail.objects.create(source=sourcename, size=size) t.thumb.save(name, imgfile) return t " 4470,"def test_compute_bridged_electrodes(): """"""Test computing bridged electrodes."""""" # test I/O raw = read_raw_fif(raw_fname).load_data() raw.pick_types(meg=True) with pytest.raises(RuntimeError, match='No EEG channels found'): bridged_idx, ed_matrix = compute_bridged_electrodes(raw) # test output raw = read_raw_fif(raw_fname).load_data() idx0 = raw.ch_names.index('EEG 001') idx1 = raw.ch_names.index('EEG 002') raw._data[idx1] = raw._data[idx0] bridged_idx, ed_matrix = compute_bridged_electrodes(raw) assert bridged_idx == [(idx0, idx1)] picks = pick_types(raw.info, meg=False, eeg=True) assert ed_matrix.shape == (raw.times.size // (2 * raw.info['sfreq']), picks.size, picks.size) picks = list(picks) assert np.all(ed_matrix[:, picks.index(idx0), picks.index(idx1)] == 0) assert np.all(np.isnan(ed_matrix[0][np.tril_indices(len(picks), -1)])) ","def test_compute_bridged_electrodes(): """"""Test computing bridged electrodes."""""" # test EEG channels are present raw = read_raw_fif(raw_fname).load_data() raw.pick_types(meg=True) with pytest.raises(RuntimeError, match='No EEG channels found'): bridged_idx, ed_matrix = compute_bridged_electrodes(raw) # test output raw = read_raw_fif(raw_fname).load_data() idx0 = raw.ch_names.index('EEG 001') idx1 = raw.ch_names.index('EEG 002') raw._data[idx1] = raw._data[idx0] bridged_idx, ed_matrix = compute_bridged_electrodes(raw) assert bridged_idx == [(idx0, idx1)] picks = pick_types(raw.info, meg=False, eeg=True) assert ed_matrix.shape == (raw.times.size // (2 * raw.info['sfreq']), picks.size, picks.size) picks = list(picks) assert np.all(ed_matrix[:, picks.index(idx0), picks.index(idx1)] == 0) assert np.all(np.isnan(ed_matrix[0][np.tril_indices(len(picks), -1)])) " 814,"def kurtosis(X, condition=None, **kwargs): """""" Measure of the tailedness (outliers) of the probability distribution. Kurtosis of any univariate normal distribution is 3. Kurtosis less than 3 means that the distribution produces fewer and less extreme outliers than the normal distribution. kurtosis(X) = E(((X - E(X))/sigma)**4) Examples ======== >>> from sympy.stats import kurtosis, Exponential, Normal >>> from sympy import Symbol >>> X = Normal('X', 0, 1) >>> kurtosis(X) 3 >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> kurtosis(Y) 9 References ========== .. [1] https://en.wikipedia.org/wiki/Kurtosis """""" return smoment(X, 4, condition=condition, **kwargs) ","def kurtosis(X, condition=None, **kwargs): """""" Characterizes the tails/outliers of a probability distribution. Kurtosis of any univariate normal distribution is 3. Kurtosis less than 3 means that the distribution produces fewer and less extreme outliers than the normal distribution. kurtosis(X) = E(((X - E(X))/sigma)**4) Examples ======== >>> from sympy.stats import kurtosis, Exponential, Normal >>> from sympy import Symbol >>> X = Normal('X', 0, 1) >>> kurtosis(X) 3 >>> rate = Symbol('lambda', positive=True, real=True, finite=True) >>> Y = Exponential('Y', rate) >>> kurtosis(Y) 9 References ========== .. [1] https://en.wikipedia.org/wiki/Kurtosis """""" return smoment(X, 4, condition=condition, **kwargs) " 45425,"def test_loc_4358(): arrays = [ [""bar"", ""bar"", ""baz"", ""baz"", ""foo"", ""foo"", ""qux"", ""qux""], [""one"", ""two"", ""one"", ""two"", ""one"", ""two"", ""one"", ""two""], ] columns = pd.MultiIndex.from_tuples(zip(*arrays), names=[""a"", ""b""]) data = [np.random.randint(10, 50, len(columns)) for _ in range(30)] modin_df = pd.DataFrame(data, columns=columns) pandas_df = pandas.DataFrame(data, columns=columns) df_equals(modin_df.loc[:, (""bar"", ""two"")], pandas_df.loc[:, (""bar"", ""two"")]) ","def test_loc_mutli_index_with_tuples(): arrays = [ [""bar"", ""bar"", ""baz"", ""baz"", ""foo"", ""foo"", ""qux"", ""qux""], [""one"", ""two"", ""one"", ""two"", ""one"", ""two"", ""one"", ""two""], ] columns = pd.MultiIndex.from_tuples(zip(*arrays), names=[""a"", ""b""]) data = [np.random.randint(10, 50, len(columns)) for _ in range(30)] modin_df = pd.DataFrame(data, columns=columns) pandas_df = pandas.DataFrame(data, columns=columns) df_equals(modin_df.loc[:, (""bar"", ""two"")], pandas_df.loc[:, (""bar"", ""two"")]) " 51457,"def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) compatible = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because it has no currently "" ""installed IO backends. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But their dependencies may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) ","def guess_engine(store_spec): engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) # no engine found, find out why compatible_engines = [] for engine, backend_cls in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible.append(engine) except Exception: warnings.warn(f""{engine!r} fails while guessing"", RuntimeWarning) installed = [k for k in engines if k != ""store""] if not compatible: if installed: error_msg = ( ""did not find a match in any of xarray's currently installed IO "" f""backends {installed}. Consider explicitly selecting one of the "" ""installed engines via the ``engine`` parameter, or installing "" ""additional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html"" ) else: error_msg = ( ""xarray is unable to open this file because it has no currently "" ""installed IO backends. Xarray's read/write support requires "" ""installing optional IO dependencies, see:\n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html\n"" ""http://xarray.pydata.org/en/stable/user-guide/io"" ) else: error_msg = ( ""found the following matches with the input file in xarray's IO "" f""backends: {compatible}. But their dependencies may not be installed, see:\n"" ""http://xarray.pydata.org/en/stable/user-guide/io.html \n"" ""http://xarray.pydata.org/en/stable/getting-started-guide/installing.html"" ) raise ValueError(error_msg) " 4047,"def do_urlencode(value: Union[str, Iterable, Sequence]): """"""Quote data for use in a URL path or query using UTF-8. Basic wrapper around :func:`urllib.parse.quote` when given a string, or :func:`urllib.parse.urlencode` for a dict or iterable. :param value: Data to quote. A string will be quoted directly. A dict or iterable of ``(key, value)`` pairs will be joined as a query string. When given a string, ""/"" is not quoted. HTTP servers treat ""/"" and ""%2F"" equivalently in paths. If you need quoted slashes, use the ``|replace(""/"", ""%2F"")`` filter. .. versionadded:: 2.7 """""" if isinstance(value, str) or not isinstance(value, abc.Iterable): return url_quote(value) if isinstance(value, dict): items = value.items() else: items = iter(value) return ""&"".join( f""{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}"" for k, v in items ) ","def do_urlencode(value: Union[str, Iterable, Sequence]) -> str: """"""Quote data for use in a URL path or query using UTF-8. Basic wrapper around :func:`urllib.parse.quote` when given a string, or :func:`urllib.parse.urlencode` for a dict or iterable. :param value: Data to quote. A string will be quoted directly. A dict or iterable of ``(key, value)`` pairs will be joined as a query string. When given a string, ""/"" is not quoted. HTTP servers treat ""/"" and ""%2F"" equivalently in paths. If you need quoted slashes, use the ``|replace(""/"", ""%2F"")`` filter. .. versionadded:: 2.7 """""" if isinstance(value, str) or not isinstance(value, abc.Iterable): return url_quote(value) if isinstance(value, dict): items = value.items() else: items = iter(value) return ""&"".join( f""{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}"" for k, v in items ) " 35489,"def thermald_thread(end_event, hw_queue): pm = messaging.PubMaster(['deviceState']) sm = messaging.SubMaster([""peripheralState"", ""gpsLocationExternal"", ""controlsState"", ""pandaStates""], poll=[""pandaStates""]) fan_speed = 0 count = 0 onroad_conditions: Dict[str, bool] = { ""ignition"": False, } startup_conditions: Dict[str, bool] = {} startup_conditions_prev: Dict[str, bool] = {} off_ts = None started_ts = None started_seen = False thermal_status = ThermalStatus.green usb_power = True last_hw_state = HardwareState( network_type=NetworkType.none, network_strength=NetworkStrength.unknown, network_info=None, nvme_temps=[], modem_temps=[], ) current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML) temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML) should_start_prev = False in_car = False handle_fan = None is_uno = False engaged_prev = False params = Params() power_monitor = PowerMonitoring() HARDWARE.initialize_hardware() thermal_config = HARDWARE.get_thermal_config() # TODO: use PI controller for UNO controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML)) while not end_event.is_set(): sm.update(PANDA_STATES_TIMEOUT) pandaStates = sm['pandaStates'] peripheralState = sm['peripheralState'] msg = read_thermal(thermal_config) if sm.updated['pandaStates'] and len(pandaStates) > 0: # Set ignition based on any panda connected onroad_conditions[""ignition""] = False for pandaState in pandaStates: if pandaState.pandaType != log.PandaState.PandaType.unknown: onroad_conditions[""ignition""] |= pandaState.ignitionLine or pandaState.ignitionCan pandaState = pandaStates[0] in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client # Setup fan handler on first connect to panda if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown: is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno if TICI: cloudlog.info(""Setting up TICI fan handler"") handle_fan = handle_fan_tici elif is_uno or PC: cloudlog.info(""Setting up UNO fan handler"") handle_fan = handle_fan_uno else: cloudlog.info(""Setting up EON fan handler"") setup_eon_fan() handle_fan = handle_fan_eon try: last_hw_state = hw_queue.get_nowait() except queue.Empty: pass msg.deviceState.freeSpacePercent = get_available_percent(default=100.0) msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent)) msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)] msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent())) msg.deviceState.networkType = last_hw_state.network_type msg.deviceState.networkStrength = last_hw_state.network_strength if last_hw_state.network_info is not None: msg.deviceState.networkInfo = last_hw_state.network_info msg.deviceState.nvmeTempC = last_hw_state.nvme_temps msg.deviceState.modemTempC = last_hw_state.modem_temps msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness() msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity() msg.deviceState.batteryCurrent = HARDWARE.get_battery_current() msg.deviceState.usbOnline = HARDWARE.get_usb_present() current_filter.update(msg.deviceState.batteryCurrent / 1e6) max_comp_temp = temp_filter.update( max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC)) ) if handle_fan is not None: fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions[""ignition""]) msg.deviceState.fanSpeedPercentDesired = fan_speed is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5)) if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP: # If device is offroad we want to cool down before going onroad # since going onroad increases load and can make temps go over 107 thermal_status = ThermalStatus.danger else: current_band = THERMAL_BANDS[thermal_status] band_idx = list(THERMAL_BANDS.keys()).index(thermal_status) if current_band.min_temp is not None and max_comp_temp < current_band.min_temp: thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1] elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp: thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1] # **** starting logic **** # Ensure date/time are valid now = datetime.datetime.utcnow() startup_conditions[""time_valid""] = (now.year > 2020) or (now.year == 2020 and now.month >= 10) set_offroad_alert_if_changed(""Offroad_InvalidTime"", (not startup_conditions[""time_valid""])) startup_conditions[""up_to_date""] = params.get(""Offroad_ConnectivityNeeded"") is None or params.get_bool(""DisableUpdates"") or params.get_bool(""SnoozeUpdate"") startup_conditions[""not_uninstalling""] = not params.get_bool(""DoUninstall"") startup_conditions[""accepted_terms""] = params.get(""HasAcceptedTerms"") == terms_version # with 2% left, we killall, otherwise the phone will take a long time to boot startup_conditions[""free_space""] = msg.deviceState.freeSpacePercent > 2 startup_conditions[""completed_training""] = params.get(""CompletedTrainingVersion"") == training_version or \ params.get_bool(""Passive"") startup_conditions[""not_driver_view""] = not params.get_bool(""IsDriverViewEnabled"") startup_conditions[""not_taking_snapshot""] = not params.get_bool(""IsTakingSnapshot"") # if any CPU gets above 107 or the battery gets above 63, kill all processes # controls will warn with CPU above 95 or battery above 60 onroad_conditions[""device_temp_good""] = thermal_status < ThermalStatus.danger set_offroad_alert_if_changed(""Offroad_TemperatureTooHigh"", (not onroad_conditions[""device_temp_good""])) if TICI: missing = (not Path(""/data/media"").is_mount()) and (not os.path.isfile(""/persist/comma/living-in-the-moment"")) set_offroad_alert_if_changed(""Offroad_StorageMissing"", missing) # Handle offroad/onroad transition should_start = all(onroad_conditions.values()) if started_ts is None: should_start = should_start and all(startup_conditions.values()) if should_start != should_start_prev or (count == 0): params.put_bool(""IsOnroad"", should_start) params.put_bool(""IsOffroad"", not should_start) params.put_bool(""IsEngaged"", False) engaged_prev = False HARDWARE.set_power_save(not should_start) if sm.updated['controlsState']: engaged = sm['controlsState'].enabled if engaged != engaged_prev: params.put_bool(""IsEngaged"", engaged) engaged_prev = engaged try: with open('/dev/kmsg', 'w') as kmsg: kmsg.write(f""<3>[thermald] engaged: {engaged}\n"") except Exception: pass if should_start: off_ts = None if started_ts is None: started_ts = sec_since_boot() started_seen = True else: if onroad_conditions[""ignition""] and (startup_conditions != startup_conditions_prev): cloudlog.event(""Startup blocked"", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions) started_ts = None if off_ts is None: off_ts = sec_since_boot() # Offroad power monitoring power_monitor.calculate(peripheralState, onroad_conditions[""ignition""]) msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used() msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity()) current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0 # Check if we need to disable charging (handled by boardd) msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions[""ignition""], in_car, off_ts) # Check if we need to shut down if power_monitor.should_shutdown(peripheralState, onroad_conditions[""ignition""], in_car, off_ts, started_seen): cloudlog.warning(f""shutting device down, offroad since {off_ts}"") params.put_bool(""DoShutdown"", True) msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged msg.deviceState.started = started_ts is not None msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0)) last_ping = params.get(""LastAthenaPingTime"") if last_ping is not None: msg.deviceState.lastAthenaPingTime = int(last_ping) msg.deviceState.thermalStatus = thermal_status pm.send(""deviceState"", msg) if EON and not is_uno: set_offroad_alert_if_changed(""Offroad_ChargeDisabled"", (not usb_power)) should_start_prev = should_start startup_conditions_prev = startup_conditions.copy() # Log to statsd statlog.gauge(""free_space_percent"", msg.deviceState.freeSpacePercent) statlog.gauge(""gpu_usage_percent"", msg.deviceState.gpuUsagePercent) statlog.gauge(""memory_usage_percent"", msg.deviceState.memoryUsagePercent) for i, usage in enumerate(msg.deviceState.cpuUsagePercent): statlog.gauge(f""cpu{i}_usage_percent"", usage) for i, temp in enumerate(msg.deviceState.cpuTempC): statlog.gauge(f""cpu{i}_temperature"", temp) for i, temp in enumerate(msg.deviceState.gpuTempC): statlog.gauge(f""gpu{i}_temperature"", temp) statlog.gauge(""memory_temperature"", msg.deviceState.memoryTempC) statlog.gauge(""ambient_temperature"", msg.deviceState.ambientTempC) for i, temp in enumerate(msg.deviceState.pmicTempC): statlog.gauge(f""pmic{i}_temperature"", temp) for i, temp in enumerate(last_hw_state.nvme_temps): statlog.gauge(f""nvme_temperature{i}"", temp) for i, temp in enumerate(last_hw_state.modem_temps): statlog.gauge(f""modem_temperature{i}"", temp) statlog.gauge(""fan_speed_percent_desired"", msg.deviceState.fanSpeedPercentDesired) statlog.gauge(""screen_brightness_percent"", msg.deviceState.screenBrightnessPercent) # report to server once every 10 minutes if (count % int(600. / DT_TRML)) == 0: if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40: cloudlog.event(""High offroad memory usage"", mem=msg.deviceState.memoryUsagePercent) cloudlog.event(""STATUS_PACKET"", count=count, pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates], peripheralState=strip_deprecated_keys(peripheralState.to_dict()), location=(strip_deprecated_keys(sm[""gpsLocationExternal""].to_dict()) if sm.alive[""gpsLocationExternal""] else None), deviceState=strip_deprecated_keys(msg.to_dict())) count += 1 ","def thermald_thread(end_event, hw_queue): pm = messaging.PubMaster(['deviceState']) sm = messaging.SubMaster([""peripheralState"", ""gpsLocationExternal"", ""controlsState"", ""pandaStates""], poll=[""pandaStates""]) fan_speed = 0 count = 0 onroad_conditions: Dict[str, bool] = { ""ignition"": False, } startup_conditions: Dict[str, bool] = {} startup_conditions_prev: Dict[str, bool] = {} off_ts = None started_ts = None started_seen = False thermal_status = ThermalStatus.green usb_power = True last_hw_state = HardwareState( network_type=NetworkType.none, network_strength=NetworkStrength.unknown, network_info=None, nvme_temps=[], modem_temps=[], ) current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML) temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML) should_start_prev = False in_car = False handle_fan = None is_uno = False engaged_prev = False params = Params() power_monitor = PowerMonitoring() HARDWARE.initialize_hardware() thermal_config = HARDWARE.get_thermal_config() # TODO: use PI controller for UNO controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML)) while not end_event.is_set(): sm.update(PANDA_STATES_TIMEOUT) pandaStates = sm['pandaStates'] peripheralState = sm['peripheralState'] msg = read_thermal(thermal_config) if sm.updated['pandaStates'] and len(pandaStates) > 0: # Set ignition based on any panda connected onroad_conditions[""ignition""] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps != pandaType != log.PandaState.PandaType.unknown) pandaState = pandaStates[0] in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client # Setup fan handler on first connect to panda if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown: is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno if TICI: cloudlog.info(""Setting up TICI fan handler"") handle_fan = handle_fan_tici elif is_uno or PC: cloudlog.info(""Setting up UNO fan handler"") handle_fan = handle_fan_uno else: cloudlog.info(""Setting up EON fan handler"") setup_eon_fan() handle_fan = handle_fan_eon try: last_hw_state = hw_queue.get_nowait() except queue.Empty: pass msg.deviceState.freeSpacePercent = get_available_percent(default=100.0) msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent)) msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)] msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent())) msg.deviceState.networkType = last_hw_state.network_type msg.deviceState.networkStrength = last_hw_state.network_strength if last_hw_state.network_info is not None: msg.deviceState.networkInfo = last_hw_state.network_info msg.deviceState.nvmeTempC = last_hw_state.nvme_temps msg.deviceState.modemTempC = last_hw_state.modem_temps msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness() msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity() msg.deviceState.batteryCurrent = HARDWARE.get_battery_current() msg.deviceState.usbOnline = HARDWARE.get_usb_present() current_filter.update(msg.deviceState.batteryCurrent / 1e6) max_comp_temp = temp_filter.update( max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC)) ) if handle_fan is not None: fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions[""ignition""]) msg.deviceState.fanSpeedPercentDesired = fan_speed is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5)) if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP: # If device is offroad we want to cool down before going onroad # since going onroad increases load and can make temps go over 107 thermal_status = ThermalStatus.danger else: current_band = THERMAL_BANDS[thermal_status] band_idx = list(THERMAL_BANDS.keys()).index(thermal_status) if current_band.min_temp is not None and max_comp_temp < current_band.min_temp: thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1] elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp: thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1] # **** starting logic **** # Ensure date/time are valid now = datetime.datetime.utcnow() startup_conditions[""time_valid""] = (now.year > 2020) or (now.year == 2020 and now.month >= 10) set_offroad_alert_if_changed(""Offroad_InvalidTime"", (not startup_conditions[""time_valid""])) startup_conditions[""up_to_date""] = params.get(""Offroad_ConnectivityNeeded"") is None or params.get_bool(""DisableUpdates"") or params.get_bool(""SnoozeUpdate"") startup_conditions[""not_uninstalling""] = not params.get_bool(""DoUninstall"") startup_conditions[""accepted_terms""] = params.get(""HasAcceptedTerms"") == terms_version # with 2% left, we killall, otherwise the phone will take a long time to boot startup_conditions[""free_space""] = msg.deviceState.freeSpacePercent > 2 startup_conditions[""completed_training""] = params.get(""CompletedTrainingVersion"") == training_version or \ params.get_bool(""Passive"") startup_conditions[""not_driver_view""] = not params.get_bool(""IsDriverViewEnabled"") startup_conditions[""not_taking_snapshot""] = not params.get_bool(""IsTakingSnapshot"") # if any CPU gets above 107 or the battery gets above 63, kill all processes # controls will warn with CPU above 95 or battery above 60 onroad_conditions[""device_temp_good""] = thermal_status < ThermalStatus.danger set_offroad_alert_if_changed(""Offroad_TemperatureTooHigh"", (not onroad_conditions[""device_temp_good""])) if TICI: missing = (not Path(""/data/media"").is_mount()) and (not os.path.isfile(""/persist/comma/living-in-the-moment"")) set_offroad_alert_if_changed(""Offroad_StorageMissing"", missing) # Handle offroad/onroad transition should_start = all(onroad_conditions.values()) if started_ts is None: should_start = should_start and all(startup_conditions.values()) if should_start != should_start_prev or (count == 0): params.put_bool(""IsOnroad"", should_start) params.put_bool(""IsOffroad"", not should_start) params.put_bool(""IsEngaged"", False) engaged_prev = False HARDWARE.set_power_save(not should_start) if sm.updated['controlsState']: engaged = sm['controlsState'].enabled if engaged != engaged_prev: params.put_bool(""IsEngaged"", engaged) engaged_prev = engaged try: with open('/dev/kmsg', 'w') as kmsg: kmsg.write(f""<3>[thermald] engaged: {engaged}\n"") except Exception: pass if should_start: off_ts = None if started_ts is None: started_ts = sec_since_boot() started_seen = True else: if onroad_conditions[""ignition""] and (startup_conditions != startup_conditions_prev): cloudlog.event(""Startup blocked"", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions) started_ts = None if off_ts is None: off_ts = sec_since_boot() # Offroad power monitoring power_monitor.calculate(peripheralState, onroad_conditions[""ignition""]) msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used() msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity()) current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0 # Check if we need to disable charging (handled by boardd) msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions[""ignition""], in_car, off_ts) # Check if we need to shut down if power_monitor.should_shutdown(peripheralState, onroad_conditions[""ignition""], in_car, off_ts, started_seen): cloudlog.warning(f""shutting device down, offroad since {off_ts}"") params.put_bool(""DoShutdown"", True) msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged msg.deviceState.started = started_ts is not None msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0)) last_ping = params.get(""LastAthenaPingTime"") if last_ping is not None: msg.deviceState.lastAthenaPingTime = int(last_ping) msg.deviceState.thermalStatus = thermal_status pm.send(""deviceState"", msg) if EON and not is_uno: set_offroad_alert_if_changed(""Offroad_ChargeDisabled"", (not usb_power)) should_start_prev = should_start startup_conditions_prev = startup_conditions.copy() # Log to statsd statlog.gauge(""free_space_percent"", msg.deviceState.freeSpacePercent) statlog.gauge(""gpu_usage_percent"", msg.deviceState.gpuUsagePercent) statlog.gauge(""memory_usage_percent"", msg.deviceState.memoryUsagePercent) for i, usage in enumerate(msg.deviceState.cpuUsagePercent): statlog.gauge(f""cpu{i}_usage_percent"", usage) for i, temp in enumerate(msg.deviceState.cpuTempC): statlog.gauge(f""cpu{i}_temperature"", temp) for i, temp in enumerate(msg.deviceState.gpuTempC): statlog.gauge(f""gpu{i}_temperature"", temp) statlog.gauge(""memory_temperature"", msg.deviceState.memoryTempC) statlog.gauge(""ambient_temperature"", msg.deviceState.ambientTempC) for i, temp in enumerate(msg.deviceState.pmicTempC): statlog.gauge(f""pmic{i}_temperature"", temp) for i, temp in enumerate(last_hw_state.nvme_temps): statlog.gauge(f""nvme_temperature{i}"", temp) for i, temp in enumerate(last_hw_state.modem_temps): statlog.gauge(f""modem_temperature{i}"", temp) statlog.gauge(""fan_speed_percent_desired"", msg.deviceState.fanSpeedPercentDesired) statlog.gauge(""screen_brightness_percent"", msg.deviceState.screenBrightnessPercent) # report to server once every 10 minutes if (count % int(600. / DT_TRML)) == 0: if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40: cloudlog.event(""High offroad memory usage"", mem=msg.deviceState.memoryUsagePercent) cloudlog.event(""STATUS_PACKET"", count=count, pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates], peripheralState=strip_deprecated_keys(peripheralState.to_dict()), location=(strip_deprecated_keys(sm[""gpsLocationExternal""].to_dict()) if sm.alive[""gpsLocationExternal""] else None), deviceState=strip_deprecated_keys(msg.to_dict())) count += 1 " 30677,"def set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number): """"""Sets custom marketplace GCP bucket based on branch name and build number Args: client (demisto_client): The configured client to use. prints_manager (ParallelPrintsManager): Print manager object branch_name (str): GitHub branch name ci_build_number (str): CI build number Returns: None """""" host = client.api_client.configuration.host installed_content_message = \ '\nMaking ""POST"" request to server - ""{}"" to set GCP bucket server configuration.'.format(host) prints_manager.add_print_job(installed_content_message, print_color, 0, LOG_COLORS.GREEN) # make request to update server configs server_configuration = { 'content.pack.verify': 'false', 'marketplace.initial.sync.delay': '0', 'content.pack.ignore.missing.warnings.contentpack': 'true', 'marketplace.bootstrap.bypass.url': 'https://storage.googleapis.com/marketplace-ci-build/content/builds/{}/{}'.format( branch_name, ci_build_number) } error_msg = ""Failed to set GCP bucket server config - with status code "" response_data, status_code, _ = update_server_configuration(client, server_configuration, error_msg) ","def set_marketplace_gcp_bucket_for_build(client, prints_manager, branch_name, ci_build_number): """"""Sets custom marketplace GCP bucket based on branch name and build number Args: client (demisto_client): The configured client to use. prints_manager (ParallelPrintsManager): Print manager object branch_name (str): GitHub branch name ci_build_number (str): CI build number Returns: None """""" host = client.api_client.configuration.host installed_content_message = \ '\nMaking ""POST"" request to server - ""{}"" to set GCP bucket server configuration.'.format(host) prints_manager.add_print_job(installed_content_message, print_color, 0, LOG_COLORS.GREEN) # make request to update server configs server_configuration = { 'content.pack.verify': 'false', 'marketplace.initial.sync.delay': '0', 'content.pack.ignore.missing.warnings.contentpack': 'true', 'marketplace.bootstrap.bypass.url': 'https://storage.googleapis.com/marketplace-ci-build/content/builds/{}/{}'.format( branch_name, ci_build_number) } error_msg = ""Failed to set GCP bucket server config - with status code "" return update_server_configuration(client, server_configuration, error_msg) " 32041,"def get_all_lists(args: dict, sg): params = {} pageSize = args.get('page_size') if pageSize: params['page_size'] = int(pageSize) pageToken = args.get('page_token') if pageToken: params['page_token'] = pageToken headers = args.get('headers') response = sg.client.marketing.lists.get(query_params=params) if response.status_code == 200: rBody = response.body body = json.loads(rBody.decode(""utf-8"")) ec = {'Sendgrid.Lists.Result': body['result'], 'Sendgrid.Lists.Metadata': body['_metadata']} if headers: if isinstance(headers, str): headers = headers.split("","") md = tableToMarkdown('All Lists details fetched successfully: ', body['result'], headers) return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': body, 'HumanReadable': md, 'EntryContext': ec } else: return 'All lists fetch failed: ' + str(response.body) ","def get_all_lists(args: dict, sg): params = {} pageSize = args.get('page_size') if pageSize: params['page_size'] = int(pageSize) pageToken = args.get('page_token') if pageToken: params['page_token'] = pageToken headers = args.get('headers') response = sg.client.marketing.lists.get(query_params=params) if response.status_code == 200: rBody = response.body body = json.loads(rBody.decode(""utf-8"")) ec = {'Sendgrid.Lists.Result': body['result'], 'Sendgrid.Lists.Metadata': body['_metadata']} if headers: if isinstance(headers, str): headers = headers.split("","") md = tableToMarkdown('Lists information was fetched successfully: ', body['result'], headers) return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': body, 'HumanReadable': md, 'EntryContext': ec } else: return 'All lists fetch failed: ' + str(response.body) " 38666,"def test_jsonext_dump(tmp_path): json_dump = tmp_path / 'test.json' json_dump_indent = tmp_path / 'test_indent.json' with open(json_dump, 'w') as f: jsonext.dump({'foo': sn.defer(['bar'])}, f) with open(json_dump, 'r') as f: assert '{""foo"": [""bar""]}' == f.read() with open(json_dump_indent, 'w') as f: jsonext.dump({'foo': sn.defer(['bar'])}, f, indent=2) with open(json_dump_indent, 'r') as f: assert '{\n ""foo"": [\n ""bar""\n ]\n}' == f.read() ","def test_jsonext_dump(tmp_path): json_dump = tmp_path / 'test.json' json_dump_indent = tmp_path / 'test_indent.json' with open(json_dump, 'w') as fp: jsonext.dump({'foo': sn.defer(['bar'])}, f) with open(json_dump, 'r') as f: assert '{""foo"": [""bar""]}' == f.read() with open(json_dump_indent, 'w') as f: jsonext.dump({'foo': sn.defer(['bar'])}, f, indent=2) with open(json_dump_indent, 'r') as f: assert '{\n ""foo"": [\n ""bar""\n ]\n}' == f.read() " 58062,"def get_conforming_spyware_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> List[PanosObjectReference]: """""" Returns all Anti-spyware profiles that conform to best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.get_all_conforming_spyware_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=minimum_block_severities.split("",""), minimum_alert_severities=minimum_alert_severities.split("","") ) ","def get_conforming_spyware_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> List[PanosObjectReference]: """""" Returns all Anti-spyware profiles that conform to best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.get_all_conforming_spyware_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=argToList(minimum_block_severities), minimum_alert_severities=argToList(minimum_alert_severities) ) " 31078,"def main(): install_logging('Validate Premium Packs.log') logging.info('Retrieving the index fle') options = options_handler() index_file_path = unzip_index_and_return_index_file(options.index_path) # Validate index.json file index_data = check_and_return_index_data(index_file_path) # Validate commit hash in master history check_commit_in_master_history(index_data[""commit""], options.master_history) # Get the host by the ami env hosts, _ = Build.get_servers(ami_env=options.ami_env) logging.info('Retrieving the credentials for Cortex XSOAR server') secret_conf_file = get_json_file(path=options.secret) username: str = secret_conf_file.get('username') password: str = secret_conf_file.get('userPassword') # Check the marketplace for host in hosts: server = Server(host=host, user_name=username, password=password) paid_packs = get_paid_packs(client=server.client) if paid_packs is not None: logging.info(f'Verifying premium packs in {host}') verify_server_paid_packs_by_index(paid_packs, index_data[""packs""]) logging.success(f'All premium packs in host: {host} are valid') else: os.remove(index_file_path) logging.error(f'Missing premium packs in host: {host}') sys.exit(1) os.remove(index_file_path) ","def main(): install_logging('Validate Premium Packs.log') logging.info('Retrieving the index file') options = options_handler() index_file_path = unzip_index_and_return_index_file(options.index_path) # Validate index.json file index_data = check_and_return_index_data(index_file_path) # Validate commit hash in master history check_commit_in_master_history(index_data[""commit""], options.master_history) # Get the host by the ami env hosts, _ = Build.get_servers(ami_env=options.ami_env) logging.info('Retrieving the credentials for Cortex XSOAR server') secret_conf_file = get_json_file(path=options.secret) username: str = secret_conf_file.get('username') password: str = secret_conf_file.get('userPassword') # Check the marketplace for host in hosts: server = Server(host=host, user_name=username, password=password) paid_packs = get_paid_packs(client=server.client) if paid_packs is not None: logging.info(f'Verifying premium packs in {host}') verify_server_paid_packs_by_index(paid_packs, index_data[""packs""]) logging.success(f'All premium packs in host: {host} are valid') else: os.remove(index_file_path) logging.error(f'Missing premium packs in host: {host}') sys.exit(1) os.remove(index_file_path) " 7184,"def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use ndimage.label to label the connected' 'components of the image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions ","def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r""""""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions ` for more details. .. deprecated:: 0.16.0 Use ""rc"" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """""" if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use skimage.measure.label to label the connected ' 'components of the image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates=""rc""` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than ""rc"" for the ""coordinates"" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use ""rc"" coordinates and ' 'stop using the ""coordinates"" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions " 42723,"def test_edit_snapshot(rotkehlchen_api_server): db = rotkehlchen_api_server.rest_api.rotkehlchen.data.db ts = ts_now() with db.user_write() as cursor: _populate_db_with_balances(cursor, db, ts) _populate_db_with_location_data(cursor, db, ts) snapshot_payload = { 'balances_snapshot': [ { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'AVAX', 'amount': '1000.00', 'usd_value': '12929.00', }, { 'timestamp': ts, 'category': 'asset', 'asset_identifier': NFT_TOKEN_ID, 'amount': '1000.00', 'usd_value': '12929.00', }, ], 'location_data_snapshot': [ {'timestamp': ts, 'location': 'external', 'usd_value': '12929.00'}, {'timestamp': ts, 'location': 'total', 'usd_value': '12929.00'}, ], } response = requests.patch( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), json=snapshot_payload, ) assert_proper_response(response) # compare the updated snapshot with snapshot in db response = requests.get( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), ) result = assert_proper_response_with_result(response) assert len(result['balances_snapshot']) == 2 assert len(result['location_data_snapshot']) == 2 assert result == snapshot_payload # test that editing a snapshot with an invalid data rollbacks previous snapshot if failure. invalid_snapshot_payload = { 'balances_snapshot': [ { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'MEME', 'amount': '1000.00', 'usd_value': '100.00', }, { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'MEME', 'amount': '1000.00', 'usd_value': '100.00', }, ], 'location_data_snapshot': [ {'timestamp': ts, 'location': 'external', 'usd_value': '200.00'}, {'timestamp': ts, 'location': 'total', 'usd_value': '200.00'}, ], } response = requests.patch( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), json=invalid_snapshot_payload, ) assert_error_response( response=response, contained_in_msg='Adding timed_balance failed', status_code=HTTPStatus.CONFLICT, ) # check that the previous snapshot remains unchanged response = requests.get( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), ) result = assert_proper_response_with_result(response) assert len(result['balances_snapshot']) == 2 assert len(result['location_data_snapshot']) == 2 assert result == snapshot_payload ","def test_edit_snapshot(rotkehlchen_api_server): db = rotkehlchen_api_server.rest_api.rotkehlchen.data.db ts = ts_now() with db.user_write() as cursor: _populate_db_with_balances(cursor, db, ts) _populate_db_with_location_data(cursor, db, ts) snapshot_payload = { 'balances_snapshot': [ { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'AVAX', 'amount': '1000.00', 'usd_value': '12929.00', }, { 'timestamp': ts, 'category': 'asset', 'asset_identifier': NFT_TOKEN_ID, 'amount': '1000.00', 'usd_value': '12929.00', }, ], 'location_data_snapshot': [ {'timestamp': ts, 'location': 'external', 'usd_value': '12929.00'}, {'timestamp': ts, 'location': 'total', 'usd_value': '12929.00'}, ], } response = requests.patch( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), json=snapshot_payload, ) assert_proper_response(response) # compare the updated snapshot with snapshot in db response = requests.get( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), ) result = assert_proper_response_with_result(response) assert len(result['balances_snapshot']) == 2 assert len(result['location_data_snapshot']) == 2 assert result == snapshot_payload # test that editing a snapshot rollbacks previous snapshot if failure. invalid_snapshot_payload = { 'balances_snapshot': [ { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'MEME', 'amount': '1000.00', 'usd_value': '100.00', }, { 'timestamp': ts, 'category': 'asset', 'asset_identifier': 'MEME', 'amount': '1000.00', 'usd_value': '100.00', }, ], 'location_data_snapshot': [ {'timestamp': ts, 'location': 'external', 'usd_value': '200.00'}, {'timestamp': ts, 'location': 'total', 'usd_value': '200.00'}, ], } response = requests.patch( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), json=invalid_snapshot_payload, ) assert_error_response( response=response, contained_in_msg='Adding timed_balance failed', status_code=HTTPStatus.CONFLICT, ) # check that the previous snapshot remains unchanged response = requests.get( api_url_for( rotkehlchen_api_server, 'per_timestamp_db_snapshots_resource', timestamp=ts, ), ) result = assert_proper_response_with_result(response) assert len(result['balances_snapshot']) == 2 assert len(result['location_data_snapshot']) == 2 assert result == snapshot_payload " 841,"def _inv_LU(M, iszerofunc=_iszero): """"""Calculates the inverse using LU decomposition. See Also ======== inv inverse_ADJ inverse_GE inverse_CH inverse_LDL """""" if not M.is_square: raise NonSquareMatrixError(""A Matrix must be square to invert."") if M.free_symbols != set(): _verify_invertible(M, iszerofunc=iszerofunc) return M.LUsolve(M.eye(M.rows), iszerofunc=_iszero) ","def _inv_LU(M, iszerofunc=_iszero): """"""Calculates the inverse using LU decomposition. See Also ======== inv inverse_ADJ inverse_GE inverse_CH inverse_LDL """""" if not M.is_square: raise NonSquareMatrixError(""A Matrix must be square to invert."") if M.free_symbols: _verify_invertible(M, iszerofunc=iszerofunc) return M.LUsolve(M.eye(M.rows), iszerofunc=_iszero) " 47111,"def parse_args(): parser = argparse.ArgumentParser(description=""Finetune a transformers model on a text classification task"") parser.add_argument( ""--dataset_name"", type=str, default=None, help=""The name of the dataset to use (via the datasets library)."", ) parser.add_argument( ""--dataset_config_name"", type=str, default=None, help=""The configuration name of the dataset to use (via the datasets library)."", ) parser.add_argument( ""--train_file"", type=str, default=None, help=""A csv or a json file containing the training data."" ) parser.add_argument( ""--validation_file"", type=str, default=None, help=""A csv or a json file containing the validation data."" ) parser.add_argument( ""--validation_split_percentage"", default=5, help=""The percentage of the train set used as validation set in case there's no validation split"", ) parser.add_argument( ""--model_name_or_path"", type=str, help=""Path to pretrained model or model identifier from huggingface.co/models."", required=True, ) parser.add_argument( ""--config_name"", type=str, default=None, help=""Pretrained config name or path if not the same as model_name"", ) parser.add_argument( ""--tokenizer_name"", type=str, default=None, help=""Pretrained tokenizer name or path if not the same as model_name"", ) parser.add_argument( ""--use_slow_tokenizer"", action=""store_true"", help=""If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library)."", ) parser.add_argument( ""--per_device_train_batch_size"", type=int, default=8, help=""Batch size (per device) for the training dataloader."", ) parser.add_argument( ""--per_device_eval_batch_size"", type=int, default=8, help=""Batch size (per device) for the evaluation dataloader."", ) parser.add_argument( ""--learning_rate"", type=float, default=5e-5, help=""Initial learning rate (after the potential warmup period) to use."", ) parser.add_argument(""--weight_decay"", type=float, default=0.0, help=""Weight decay to use."") parser.add_argument(""--num_train_epochs"", type=int, default=3, help=""Total number of training epochs to perform."") parser.add_argument( ""--max_train_steps"", type=int, default=None, help=""Total number of training steps to perform. If provided, overrides num_train_epochs."", ) parser.add_argument( ""--gradient_accumulation_steps"", type=int, default=1, help=""Number of updates steps to accumulate before performing a backward/update pass."", ) parser.add_argument( ""--lr_scheduler_type"", type=SchedulerType, default=""linear"", help=""The scheduler type to use."", choices=[""linear"", ""cosine"", ""cosine_with_restarts"", ""polynomial"", ""constant"", ""constant_with_warmup""], ) parser.add_argument( ""--num_warmup_steps"", type=int, default=0, help=""Number of steps for the warmup in the lr scheduler."" ) parser.add_argument(""--output_dir"", type=str, default=None, help=""Where to store the final model."") parser.add_argument(""--seed"", type=int, default=None, help=""A seed for reproducible training."") parser.add_argument( ""--model_type"", type=str, default=None, help=""Model type to use if training from scratch."", choices=MODEL_TYPES, ) parser.add_argument( ""--block_size"", type=int, default=None, help=""Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens)."", ) parser.add_argument( ""--preprocessing_num_workers"", type=int, default=None, help=""The number of processes to use for the preprocessing."", ) parser.add_argument( ""--overwrite_cache"", type=bool, default=False, help=""Overwrite the cached training and evaluation sets"" ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError(""Need either a dataset name or a training/validation file."") else: if args.train_file is not None: extension = args.train_file.split(""."")[-1] assert extension in [""csv"", ""json"", ""txt""], ""`train_file` should be a csv, json or txt file."" if args.validation_file is not None: extension = args.validation_file.split(""."")[-1] assert extension in [""csv"", ""json"", ""txt""], ""`validation_file` should be a csv, json or txt file."" if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args ","def parse_args(): parser = argparse.ArgumentParser(description=""Finetune a transformers model on a causal language modeling task"") parser.add_argument( ""--dataset_name"", type=str, default=None, help=""The name of the dataset to use (via the datasets library)."", ) parser.add_argument( ""--dataset_config_name"", type=str, default=None, help=""The configuration name of the dataset to use (via the datasets library)."", ) parser.add_argument( ""--train_file"", type=str, default=None, help=""A csv or a json file containing the training data."" ) parser.add_argument( ""--validation_file"", type=str, default=None, help=""A csv or a json file containing the validation data."" ) parser.add_argument( ""--validation_split_percentage"", default=5, help=""The percentage of the train set used as validation set in case there's no validation split"", ) parser.add_argument( ""--model_name_or_path"", type=str, help=""Path to pretrained model or model identifier from huggingface.co/models."", required=True, ) parser.add_argument( ""--config_name"", type=str, default=None, help=""Pretrained config name or path if not the same as model_name"", ) parser.add_argument( ""--tokenizer_name"", type=str, default=None, help=""Pretrained tokenizer name or path if not the same as model_name"", ) parser.add_argument( ""--use_slow_tokenizer"", action=""store_true"", help=""If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library)."", ) parser.add_argument( ""--per_device_train_batch_size"", type=int, default=8, help=""Batch size (per device) for the training dataloader."", ) parser.add_argument( ""--per_device_eval_batch_size"", type=int, default=8, help=""Batch size (per device) for the evaluation dataloader."", ) parser.add_argument( ""--learning_rate"", type=float, default=5e-5, help=""Initial learning rate (after the potential warmup period) to use."", ) parser.add_argument(""--weight_decay"", type=float, default=0.0, help=""Weight decay to use."") parser.add_argument(""--num_train_epochs"", type=int, default=3, help=""Total number of training epochs to perform."") parser.add_argument( ""--max_train_steps"", type=int, default=None, help=""Total number of training steps to perform. If provided, overrides num_train_epochs."", ) parser.add_argument( ""--gradient_accumulation_steps"", type=int, default=1, help=""Number of updates steps to accumulate before performing a backward/update pass."", ) parser.add_argument( ""--lr_scheduler_type"", type=SchedulerType, default=""linear"", help=""The scheduler type to use."", choices=[""linear"", ""cosine"", ""cosine_with_restarts"", ""polynomial"", ""constant"", ""constant_with_warmup""], ) parser.add_argument( ""--num_warmup_steps"", type=int, default=0, help=""Number of steps for the warmup in the lr scheduler."" ) parser.add_argument(""--output_dir"", type=str, default=None, help=""Where to store the final model."") parser.add_argument(""--seed"", type=int, default=None, help=""A seed for reproducible training."") parser.add_argument( ""--model_type"", type=str, default=None, help=""Model type to use if training from scratch."", choices=MODEL_TYPES, ) parser.add_argument( ""--block_size"", type=int, default=None, help=""Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens)."", ) parser.add_argument( ""--preprocessing_num_workers"", type=int, default=None, help=""The number of processes to use for the preprocessing."", ) parser.add_argument( ""--overwrite_cache"", type=bool, default=False, help=""Overwrite the cached training and evaluation sets"" ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError(""Need either a dataset name or a training/validation file."") else: if args.train_file is not None: extension = args.train_file.split(""."")[-1] assert extension in [""csv"", ""json"", ""txt""], ""`train_file` should be a csv, json or txt file."" if args.validation_file is not None: extension = args.validation_file.split(""."")[-1] assert extension in [""csv"", ""json"", ""txt""], ""`validation_file` should be a csv, json or txt file."" if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args " 31826,"def filter_vendor_fields(alert: dict): """"""Remove non relevant fields from the alert event (filter by vendor: Amazon/google/Microsoft) Args: alert (dict): The alert to filter Returns: dict: The filtered alert """""" vendor_mapper = { 'Amazon': ALERT_EVENT_AWS_FIELDS, 'Google': ALERT_EVENT_GCP_FIELDS, 'MSFT': ALERT_EVENT_AZURE_FIELDS, } event = alert.get('event', {}) vendor = event.get('vendor') if vendor and vendor in vendor_mapper: raw_log = event.get('raw_log', {}) if raw_log and isinstance(raw_log, dict): for key in list(raw_log): if key not in vendor_mapper[vendor]: raw_log.pop(key) ","def filter_vendor_fields(alert: dict): """"""Remove non relevant fields from the alert event (filter by vendor: Amazon/google/Microsoft) Args: alert (dict): The alert to filter Returns: dict: The filtered alert """""" vendor_mapper = { 'Amazon': ALERT_EVENT_AWS_FIELDS, 'Google': ALERT_EVENT_GCP_FIELDS, 'MSFT': ALERT_EVENT_AZURE_FIELDS, } event = alert.get('event', {}) vendor = event.get('vendor') if vendor and vendor in vendor_mapper: raw_log = event.get('raw_log', {}) if raw_log and isinstance(raw_log, dict): for key in raw_log: if key not in vendor_mapper[vendor]: raw_log.pop(key) " 46895,"def write_model_card( hf_model_name: str, repo_path=""OPUS-MT-train/models/"", dry_run=False, model_card_dir=Path(""marian_converted/model_cards/Helsinki-NLP/""), ) -> str: """"""Copy the most recent model's readme section from opus, and add metadata. upload command: s3cmd sync --recursive model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ """""" hf_model_name = remove_prefix(hf_model_name, ORG_NAME) opus_name: str = convert_hf_name_to_opus_name(hf_model_name) opus_src, opus_tgt = [x.split(""+"") for x in opus_name.split(""-"")] readme_url = OPUS_GITHUB_URL + f""{opus_name}/README.md"" s, t = "","".join(opus_src), "","".join(opus_tgt) extra_markdown = f""### {hf_model_name}\n\n* source languages: {s}\n* target languages: {t}\n* OPUS readme: [{opus_name}]({readme_url})\n"" # combine with opus markdown opus_readme_path = Path(f""{repo_path}{opus_name}/README.md"") assert opus_readme_path.exists(), f""Readme path {opus_readme_path} not found"" content = opus_readme_path.open().read() content = content.split(""\n# "")[-1] # Get the lowest level 1 header in the README -- the most recent model. content = ""*"".join(content.split(""*"")[1:]) content = extra_markdown + ""\n* "" + content.replace(""download"", ""download original weights"") if dry_run: return content # Save string to model_cards/hf_model_name/readme.md model_card_dir.mkdir(exist_ok=True) sub_dir = model_card_dir / hf_model_name sub_dir.mkdir(exist_ok=True) dest = sub_dir / ""README.md"" dest.open(""w"").write(content) return content ","def write_model_card( hf_model_name: str, repo_path=""OPUS-MT-train/models/"", dry_run=False, model_card_dir=Path(""marian_converted/model_cards/Helsinki-NLP/""), ) -> str: """"""Copy the most recent model's readme section from opus, and add metadata. upload command: s3cmd sync --recursive model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ """""" hf_model_name = remove_prefix(hf_model_name, ORG_NAME) opus_name: str = convert_hf_name_to_opus_name(hf_model_name) opus_src, opus_tgt = [x.split(""+"") for x in opus_name.split(""-"")] readme_url = OPUS_GITHUB_URL + f""{opus_name}/README.md"" s, t = "","".join(opus_src), "","".join(opus_tgt) extra_markdown = f""### {hf_model_name}\n\n* source languages: {s}\n* target languages: {t}\n* OPUS readme: [{opus_name}]({readme_url})\n"" # combine with opus markdown opus_readme_path = Path(f""{repo_path}{opus_name}/README.md"") assert opus_readme_path.exists(), f""Readme file {opus_readme_path} not found"" content = opus_readme_path.open().read() content = content.split(""\n# "")[-1] # Get the lowest level 1 header in the README -- the most recent model. content = ""*"".join(content.split(""*"")[1:]) content = extra_markdown + ""\n* "" + content.replace(""download"", ""download original weights"") if dry_run: return content # Save string to model_cards/hf_model_name/readme.md model_card_dir.mkdir(exist_ok=True) sub_dir = model_card_dir / hf_model_name sub_dir.mkdir(exist_ok=True) dest = sub_dir / ""README.md"" dest.open(""w"").write(content) return content " 7457,"def adapted_rand_error(image_true=None, image_test=None, *, table=None, ignore_labels=(0,), alpha=0.5): r""""""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_ Parameters ---------- image_true : ndarray of int Ground-truth label image, same shape as im_test. image_test : ndarray of int Test image. table : scipy.sparse array in crs format, optional A contingency table built with skimage.evaluate.contingency_table. If None, it will be computed on the fly. ignore_labels : sequence of int, optional Labels to ignore. Any part of the true image labeled with any of these values will not be counted in the score. alpha : float, optional A float value bounded [0,1] controlling the relative weight given to precision and recall in the adapted Rand error calculation. Default is to weight precision and recall equally. When alpha = 0, adapted Rand error = recall. When alpha = 1, adapted Rand error = precision. Returns ------- are : float The adapted Rand error; equal to :math:`1 - \frac{\sum_{ij} p_{ij}^{2}}{\alpha \sum_{k} s_{k}^{2} + (1-\alpha)\sum_{k} t_{k}^{2}}`, where :math:`p_{ij}` is the probability that a pixel has the same label in the test image *and* in the true image, :math:`t_{k}` is the probability that a pixel has label :math:`k` in the true image, and :math:`s_{k}` is the probability that a pixel has label :math:`k` in the test image. prec : float The adapted Rand precision: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the test image. rec : float The adapted Rand recall: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the true image. Notes ----- Pixels with label 0 in the true segmentation are ignored in the score. References ---------- .. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015) Crowdsourcing the creation of image segmentation algorithms for connectomics. Front. Neuroanat. 9:142. :DOI:`10.3389/fnana.2015.00142` """""" if image_test is not None and image_true is not None: check_shape_equality(image_true, image_test) if table is None: p_ij = contingency_table(image_true, image_test, ignore_labels=ignore_labels, normalize=False) else: p_ij = table if alpha < 0.0 or alpha > 1.0: raise ValueError('alpha must be between 0 and 1') # Sum of the joint distribution squared sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum() a_i = p_ij.sum(axis=1).A.ravel() b_i = p_ij.sum(axis=0).A.ravel() # Sum of squares of the test segment sizes (this is 2x the number of pairs # of pixels with the same label in im_test) sum_a2 = a_i @ a_i - a_i.sum() # Same for im_true sum_b2 = b_i @ b_i - b_i.sum() precision = sum_p_ij2 / sum_a2 recall = sum_p_ij2 / sum_b2 fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2) are = 1. - fscore return are, precision, recall ","def adapted_rand_error(image_true=None, image_test=None, *, table=None, ignore_labels=(0,), alpha=0.5): r""""""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_ Parameters ---------- image_true : ndarray of int Ground-truth label image, same shape as im_test. image_test : ndarray of int Test image. table : scipy.sparse array in crs format, optional A contingency table built with skimage.evaluate.contingency_table. If None, it will be computed on the fly. ignore_labels : sequence of int, optional Labels to ignore. Any part of the true image labeled with any of these values will not be counted in the score. alpha : float, optional A float value bounded [0,1] controlling the relative weight given to precision and recall in the adapted Rand error calculation. Default is to weight precision and recall equally. When alpha = 0, adapted Rand error = recall. When alpha = 1, adapted Rand error = precision. Returns ------- are : float The adapted Rand error. prec : float The adapted Rand precision: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the test image. rec : float The adapted Rand recall: this is the number of pairs of pixels that have the same label in the test label image *and* in the true image, divided by the number in the true image. Notes ----- Pixels with label 0 in the true segmentation are ignored in the score. References ---------- .. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015) Crowdsourcing the creation of image segmentation algorithms for connectomics. Front. Neuroanat. 9:142. :DOI:`10.3389/fnana.2015.00142` """""" if image_test is not None and image_true is not None: check_shape_equality(image_true, image_test) if table is None: p_ij = contingency_table(image_true, image_test, ignore_labels=ignore_labels, normalize=False) else: p_ij = table if alpha < 0.0 or alpha > 1.0: raise ValueError('alpha must be between 0 and 1') # Sum of the joint distribution squared sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum() a_i = p_ij.sum(axis=1).A.ravel() b_i = p_ij.sum(axis=0).A.ravel() # Sum of squares of the test segment sizes (this is 2x the number of pairs # of pixels with the same label in im_test) sum_a2 = a_i @ a_i - a_i.sum() # Same for im_true sum_b2 = b_i @ b_i - b_i.sum() precision = sum_p_ij2 / sum_a2 recall = sum_p_ij2 / sum_b2 fscore = sum_p_ij2 / (alpha * sum_a2 + (1 - alpha) * sum_b2) are = 1. - fscore return are, precision, recall " 3737,"def eq(v, w, msg=''): result = allclose(v, w) if not result: print(f'Not eq:{msg}\n{str(v)}\n----{str(w)}') return result ","def eq(v, w, msg=''): result = allclose(v, w) if not result: print(f'Not eq:{msg}\n{v}\n----{w}') return result " 8417,"def linear_exciser(spectrum, region): """""" Basic spectral excise method where the spectral region defined by the parameter ``region`` (a `~specutils.SpectralRegion`) will result in the flux between those regions set to a linear ramp of the two points immediately before and after the start and end of the region. Other methods could be defined by the user to do other types of excision. Parameters ---------- spectrum : `~specutils.Spectrum1D` The `~specutils.Spectrum1D` object to which the excision will be applied. region : `~specutils.SpectralRegion` The region of the spectrum to replace. Returns ------- spectrum : `~specutils.Spectrum1D` Output `~specutils.Spectrum1D` with the region excised. Raises ------ ValueError In the case that ``spectrum`` and ``region`` are not the correct types. """""" wavelengths = spectrum.spectral_axis.copy() flux = spectrum.flux.copy() modified_flux = flux if spectrum.uncertainty is not None: new_uncertainty = spectrum.uncertainty.copy() else: new_uncertainty = None # Need to add a check that the subregions don't overlap, since that could # cause undesired results. For now warn if there is more than one subregion if len(region) > 1: # Raise a warning if the SpectralRegion has more than one subregion, since # the handling for this is perhaps unexpected warnings.warn(""A SpectralRegion with multiple subregions was provided as "" ""input. This may lead to undesired behavior with linear_exciser if "" ""the subregions overlap."", AstropyUserWarning) for subregion in region: # Find the indices of the spectral_axis array corresponding to the subregion wavelengths_in = (wavelengths >= subregion.lower) & (wavelengths < subregion.upper) inclusive_indices = np.nonzero(wavelengths_in)[0] # Now set the flux values for these indices to be a # linear range s, e = max(inclusive_indices[0]-1, 0), min(inclusive_indices[-1]+1, wavelengths.size-1) modified_flux[s:e+1] = np.linspace(flux[s], flux[e], modified_flux[s:e+1].size) # Add the uncertainty of the two linear interpolation endpoints in # quadrature and apply to the excised region. if new_uncertainty is not None: new_uncertainty[s:e] = np.sqrt(spectrum.uncertainty[s]**2 + spectrum.uncertainty[e]**2) # Return a new object with the regions excised. return Spectrum1D(flux=modified_flux, spectral_axis=wavelengths, uncertainty=new_uncertainty, wcs=spectrum.wcs, mask = spectrum.mask, velocity_convention=spectrum.velocity_convention, rest_value=spectrum.rest_value, radial_velocity = spectrum.radial_velocity) ","def linear_exciser(spectrum, region): """""" Basic spectral excise method where the spectral region defined by the parameter ``region`` (a `~specutils.SpectralRegion`) will result in the flux between those regions set to a linear ramp of the two points immediately before and after the start and end of the region. Other methods could be defined by the user to do other types of excision. Parameters ---------- spectrum : `~specutils.Spectrum1D` The `~specutils.Spectrum1D` object to which the excision will be applied. region : `~specutils.SpectralRegion` The region of the spectrum to replace. Returns ------- spectrum : `~specutils.Spectrum1D` Output `~specutils.Spectrum1D` with the region excised. Raises ------ ValueError In the case that ``spectrum`` and ``region`` are not the correct types. """""" wavelengths = spectrum.spectral_axis.copy() flux = spectrum.flux.copy() modified_flux = flux if spectrum.uncertainty is not None: new_uncertainty = spectrum.uncertainty.copy() else: new_uncertainty = None # Need to add a check that the subregions don't overlap, since that could # cause undesired results. For now warn if there is more than one subregion if len(region) > 1: # Raise a warning if the SpectralRegion has more than one subregion, since # the handling for this is perhaps unexpected warnings.warn(""A SpectralRegion with multiple subregions was provided as "" ""input. This may lead to undesired behavior with linear_exciser if "" ""the subregions overlap."", AstropyUserWarning) for subregion in region: # Find the indices of the spectral_axis array corresponding to the subregion wavelengths_in = (wavelengths >= subregion.lower) & (wavelengths < subregion.upper) inclusive_indices = np.nonzero(wavelengths_in)[0] # Now set the flux values for these indices to be a # linear range s, e = max(inclusive_indices[0]-1, 0), min(inclusive_indices[-1]+1, wavelengths.size-1) modified_flux[s:e+1] = np.linspace(flux[s], flux[e], modified_flux[s:e+1].size) # Add the uncertainty of the two linear interpolation endpoints in # quadrature and apply to the excised region. if new_uncertainty is not None: new_uncertainty[s:e] = np.sqrt(spectrum.uncertainty[s]**2 + spectrum.uncertainty[e]**2) # Return a new object with the regions excised. return Spectrum1D(flux=modified_flux, spectral_axis=spectral_axis, uncertainty=new_uncertainty, wcs=spectrum.wcs, mask = spectrum.mask, velocity_convention=spectrum.velocity_convention, rest_value=spectrum.rest_value, radial_velocity = spectrum.radial_velocity) " 54219,"def list_modules( search_dir: str = os.path.join(os.path.dirname(__file__), '..'), include_parent: bool = False ) -> List[Module]: """"""Returns a list of python modules based defined by setup.py files. Args: include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding module will be included. search_dir: the search directory for modules, by default the repo root. Returns: a list of `Module`s that were found, where each module `m` is initialized with `m.root` relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the keyword args passed to the `setuptools.setup` method in setup.py """""" relative_folders = sorted( [ f for f in os.listdir(search_dir) if os.path.isdir(os.path.join(search_dir, f)) and os.path.isfile(os.path.join(search_dir, f, ""setup.py"")) ] ) if include_parent: parent_setup_py = os.path.join(search_dir, ""setup.py"") assert os.path.isfile( parent_setup_py ), f""include_parent=True, but {parent_setup_py} does not exist."" relative_folders.append(""."") result = [ Module(root=folder, raw_setup=_parse_module(os.path.join(search_dir, folder))) for folder in relative_folders ] return result ","def list_modules( search_dir: str = os.path.join(os.path.dirname(__file__), '..'), include_parent: bool = False ) -> List[Module]: """"""Returns a list of python modules based defined by setup.py files. Args: include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding module will be included. search_dir: the search directory for modules, by default the repo root. Returns: a list of `Module`s that were found, where each module `m` is initialized with `m.root` relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the keyword args passed to the `setuptools.setup` method in setup.py """""" relative_folders = sorted( f for f in os.listdir(search_dir) if os.path.isdir(os.path.join(search_dir, f)) and os.path.isfile(os.path.join(search_dir, f, ""setup.py"")) ) relative_folders = sorted( f for f in search_dir.iterdir() if f.is_dir() and f.joinpath(""setup.py"").is_file() ) if include_parent: parent_setup_py = os.path.join(search_dir, ""setup.py"") assert os.path.isfile( parent_setup_py ), f""include_parent=True, but {parent_setup_py} does not exist."" relative_folders.append(""."") result = [ Module(root=folder, raw_setup=_parse_module(os.path.join(search_dir, folder))) for folder in relative_folders ] return result " 20456,"def fill_mail_tracking_value_track_sequence(env): env.cr.execute( """""" SELECT DISTINCT mtv.field, mm.model FROM mail_tracking_value mtv INNER JOIN mail_message mm ON mtv.mail_message_id = mm.id INNER JOIN ir_model_fields imf ON ( imf.name = mtv.field AND mm.model = imf.model) WHERE mm.model IS NOT NULL AND imf.track_visibility IS NOT NULL AND imf.track_visibility != 'false' """""" ) for field, model in env.cr.fetchall(): if field in list(env[model]._fields.keys()): sequence = getattr(env[model]._fields[field], 'track_sequence', 100) if sequence != 100: env.cr.execute( """""" UPDATE mail_tracking_value mtv2 SET track_sequence = %s FROM mail_tracking_value mtv INNER JOIN mail_message mm ON mtv.mail_message_id = mm.id WHERE mtv2.id = mtv.id AND mtv.field = %s AND mm.model = %s """""" % (sequence, field, model) ) ","def fill_mail_tracking_value_track_sequence(env): env.cr.execute( """""" SELECT DISTINCT mtv.field, mm.model FROM mail_tracking_value mtv INNER JOIN mail_message mm ON mtv.mail_message_id = mm.id INNER JOIN ir_model_fields imf ON ( imf.name = mtv.field AND mm.model = imf.model) WHERE mm.model IS NOT NULL AND imf.track_visibility IS NOT NULL AND imf.track_visibility != 'false' """""" ) for field, model in env.cr.fetchall(): if env.get(model) and field in list(env[model]._fields.keys()): sequence = getattr(env[model]._fields[field], 'track_sequence', 100) if sequence != 100: env.cr.execute( """""" UPDATE mail_tracking_value mtv2 SET track_sequence = %s FROM mail_tracking_value mtv INNER JOIN mail_message mm ON mtv.mail_message_id = mm.id WHERE mtv2.id = mtv.id AND mtv.field = %s AND mm.model = %s """""" % (sequence, field, model) ) " 41714,"def get_extras_require(): # type: () -> Dict[str, List[str]] extras_require = { 'checking': ['autopep8', 'hacking'], 'testing': [ 'pytest', 'mock', 'bokeh', 'plotly', 'chainer>=5.0.0', 'xgboost', 'mpi4py', 'lightgbm', 'keras', 'mxnet', 'scikit-optimize', 'tensorflow', 'cma', 'dask-ml', 'dask[dataframe]' ], 'document': ['sphinx', 'sphinx_rtd_theme'], 'codecov': ['pytest-cov', 'codecov'], } if sys.version_info >= (3, 5): # mypy does not support Python 2.x. extras_require['checking'].append('mypy') return extras_require ","def get_extras_require(): # type: () -> Dict[str, List[str]] extras_require = { 'checking': ['autopep8', 'hacking'], 'testing': [ 'pytest', 'mock', 'bokeh', 'plotly', 'chainer>=5.0.0', 'xgboost', 'mpi4py', 'lightgbm', 'keras', 'mxnet', 'scikit-optimize', 'tensorflow', 'cma' ], 'document': ['sphinx', 'sphinx_rtd_theme'], 'codecov': ['pytest-cov', 'codecov'], } if sys.version_info >= (3, 5): # mypy does not support Python 2.x. extras_require['checking'].append('mypy') return extras_require " 39111,"def run(argv): """"""Main entry point; defines and runs the wordcount pipeline."""""" opts = PipelineOptions() opts.view_as(SetupOptions).save_main_session = True o = opts.view_as(MyOptions) with beam.Pipeline(options=opts) as p: # Read the text file[pattern] into a PCollection. (p | 'read' >> ReadFromText(o.input) | 'Get DLP Findings' >> beam.ParDo(DlpFindingDoFn(o.project_id)) | 'Determine if client SSN' >> beam.ParDo(ExistingSSNsDoFn(o.project_id, o.salt, o.secret_name, o.collection_name)) | 'Count findings' >> beam.combiners.Count.Globally() | 'Write to Pubsub' >> beam.ParDo(WriteToPubsub(o.project_id, o.topic, o.input))) ","def run(argv): """"""Main entry point; defines and runs the wordcount pipeline."""""" opts = PipelineOptions() opts.view_as(SetupOptions).save_main_session = True o = opts.view_as(MyOptions) with beam.Pipeline(options=opts) as p: # Read the text file[pattern] into a PCollection. (p | 'read gcs source' >> ReadFromText(o.input) | 'Get DLP Findings' >> beam.ParDo(DlpFindingDoFn(o.project_id)) | 'Determine if client SSN' >> beam.ParDo(ExistingSSNsDoFn(o.project_id, o.salt, o.secret_name, o.collection_name)) | 'Count findings' >> beam.combiners.Count.Globally() | 'Write to Pubsub' >> beam.ParDo(WriteToPubsub(o.project_id, o.topic, o.input))) " 28340,"def test_deprecated_driver_keyword(): st = station_from_config_str("""""" instruments: mock: driver: qcodes.tests.instrument_mocks type: DummyChannelInstrument """""") message = deprecation_message( 'use of the ""driver""-keyword in the station configuration file', alternative='the ""type""-keyword instead, prepending the driver value' "" to it"", ) with pytest.warns(QCoDeSDeprecationWarning, match=message): st.load_instrument('mock') ","def test_deprecated_driver_keyword(): st = station_from_config_str("""""" instruments: mock: driver: qcodes.tests.instrument_mocks type: DummyChannelInstrument """""") message = deprecation_message( 'use of the ""driver""-keyword in the station configuration file', alternative='the ""type""-keyword instead, prepending the driver value to it', ) with pytest.warns(QCoDeSDeprecationWarning, match=message): st.load_instrument('mock') " 51554,"def gen4_query_aggregated_metadata(reporton: str, ds: Dataset, aps: List[Dict], recursive: bool = False, **kwargs): """"""Query metadata in a metadata store Query paths (`aps[""path""]`) have to be contained in the poth of the ds. This requirement is due to the colling conventions of the legacy implementation. This function doesn't cache anything, hence the caller must make sure to only call this once per dataset to avoid waste. Parameters ---------- reporton : {None, 'none', 'datasets', 'files', 'all'} If `None`, reporting will be based on the `type` property of the incoming annotated paths. ds : Dataset Dataset to query aps : list Sequence of annotated paths to query metadata for. recursive : bool Whether or not to report metadata underneath all query paths recursively. **kwargs Any other argument will be passed on to the query result dictionary. Returns ------- generator Of result dictionaries. """""" annotated_paths = aps dataset = ds matching_types = { None: None, ""files"": (""file"",), ""datasets"": (""dataset"",), ""all"": (""dataset"", ""file"") }[reporton] for annotated_path in annotated_paths: relative_path = Path(annotated_path[""path""]).relative_to(dataset.pathobj) if matching_types is None: matching_types = (annotated_path[""type""],) try: for dump_result in Dump()(dataset=dataset.pathobj, path=str(relative_path), recursive=recursive, result_renderer=""disabled"", return_type=""generator""): if dump_result[""status""] != ""ok"": continue metadata = dump_result[""metadata""] if metadata[""type""] not in matching_types: continue yield { **kwargs, ""status"": ""ok"", ""type"": metadata[""type""], ""path"": str(dump_result[""path""]), ""dsid"": metadata[""dataset_id""], ""refcommit"": metadata[""dataset_version""], ""metadata"": { metadata[""extractor_name""]: metadata[""extracted_metadata""] } } except NoMetadataStoreFound: lgr.warning(f""Found no gen4-metadata in dataset {dataset.pathobj}."") if len(matching_types) == 2: matching_type = ""all"" elif len(matching_types) == 0: matching_type = ""none"" else: matching_type = matching_types[0] yield { **kwargs, 'path': str(ds.pathobj / relative_path), 'status': 'impossible', 'message': f'Dataset at {ds.pathobj} does not contain gen4 ' f'metadata', 'type': matching_type } return None ","def gen4_query_aggregated_metadata(reporton: str, ds: Dataset, aps: List[Dict], recursive: bool = False, **kwargs): """"""Query metadata in a metadata store Query paths (`aps[""path""]`) have to be contained in the poth of the ds. This requirement is due to the colling conventions of the legacy implementation. This function doesn't cache anything, hence the caller must make sure to only call this once per dataset to avoid waste. Parameters ---------- reporton : {None, 'none', 'datasets', 'files', 'all'} If `None`, reporting will be based on the `type` property of the incoming annotated paths. ds : Dataset Dataset to query aps : list Sequence of annotated paths to query metadata for. recursive : bool Whether or not to report metadata underneath all query paths recursively. **kwargs Any other argument will be passed on to the query result dictionary. Returns ------- generator Of result dictionaries. """""" annotated_paths = aps dataset = ds matching_types = { None: None, ""files"": (""file"",), ""datasets"": (""dataset"",), ""all"": (""dataset"", ""file"") }[reporton] for annotated_path in annotated_paths: relative_path = Path(annotated_path[""path""]).relative_to(dataset.pathobj) if matching_types is None: matching_types = (annotated_path[""type""],) try: for dump_result in Dump()(dataset=dataset.pathobj, path=str(relative_path), recursive=recursive, result_renderer=""disabled"", return_type=""generator""): if dump_result[""status""] != ""ok"": continue metadata = dump_result[""metadata""] if metadata[""type""] not in matching_types: continue yield { **kwargs, ""status"": ""ok"", ""type"": metadata[""type""], ""path"": str(dump_result[""path""]), ""dsid"": metadata[""dataset_id""], ""refcommit"": metadata[""dataset_version""], ""metadata"": { metadata[""extractor_name""]: metadata[""extracted_metadata""] } } except NoMetadataStoreFound: lgr.warning(f""Found no gen4-metadata in dataset {dataset.pathobj}."") if len(matching_types) == 2: matching_type = ""all"" elif len(matching_types) == 0: matching_type = ""none"" elif len(matching_types) == 1: matching_type = matching_types[0] else: raise RuntimeError(f""Was expecting matching_types with 1 element, got {matching_types}"") yield { **kwargs, 'path': str(ds.pathobj / relative_path), 'status': 'impossible', 'message': f'Dataset at {ds.pathobj} does not contain gen4 ' f'metadata', 'type': matching_type } return None " 30448,"def get_poll_minutes(current_time: datetime, received: Optional[str]): """""" Get the interval to wait before polling again in minutes. :param current_time: The current time. :param received: The time when the polling request was received. :return: Total minutes to wait before polling. """""" poll_time_minutes = 1 if received: received_time = datetime.strptime(received, DATE_FORMAT) total_delta = current_time - received_time total_minutes = total_delta.total_seconds() / 60 for minute_range, interval in POLL_INTERVAL_MINUTES.items(): if len(minute_range) > 1 and total_minutes > minute_range[1]: continue poll_time_minutes = interval break return poll_time_minutes ","def get_poll_minutes(current_time: datetime, received: Optional[str]) -> int: """""" Get the interval to wait before polling again in minutes. :param current_time: The current time. :param received: The time when the polling request was received. :return: Total minutes to wait before polling. """""" poll_time_minutes = 1 if received: received_time = datetime.strptime(received, DATE_FORMAT) total_delta = current_time - received_time total_minutes = total_delta.total_seconds() / 60 for minute_range, interval in POLL_INTERVAL_MINUTES.items(): if len(minute_range) > 1 and total_minutes > minute_range[1]: continue poll_time_minutes = interval break return poll_time_minutes " 53598,"def create(shape, chunks=True, dtype=None, compressor='default', fill_value=0, order='C', store=None, synchronizer=None, overwrite=False, path=None, chunk_store=None, filters=None, cache_metadata=True, cache_attrs=True, read_only=False, object_codec=None, dimension_separator=None, write_empty_chunks=True, storage_transformers=None, *, zarr_version=None, **kwargs): """"""Create an array. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. If an int, the chunk size in each dimension will be given by the value of `chunks`. Default is True. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. store : MutableMapping or string Store or path to directory in file system or name of zip file. synchronizer : object, optional Array synchronizer. overwrite : bool, optional If True, delete all pre-existing data in `store` at `path` before creating the array. path : string, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence of Codecs, optional Sequence of filters to use to encode chunk data prior to compression. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. read_only : bool, optional True if array should be protected against modification. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. dimension_separator : {'.', '/'}, optional Separator placed between the dimensions of a chunk. .. versionadded:: 2.8 write_empty_chunks : bool, optional If True (default), all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. .. versionadded:: 2.11 storage_transformers : sequence of StorageTransformers, optional May only be set when using zarr_version 3. Setting storage transformers, changing the storage structure and behaviour of data coming in the underlying store. The transformers are applied in the order of the given sequence. .. versionadded:: 2.13 zarr_version : {None, 2, 3}, optional The zarr protocol version of the created array. If None, it will be inferred from ``store`` or ``chunk_store`` if they are provided, otherwise defaulting to 2. Returns ------- z : zarr.core.Array Examples -------- Create an array with default settings:: >>> import zarr >>> z = zarr.create((10000, 10000), chunks=(1000, 1000)) >>> z Create an array with different some different configuration options:: >>> from numcodecs import Blosc >>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE) >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F', ... compressor=compressor) >>> z To create an array with object dtype requires a filter that can handle Python object encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`:: >>> from numcodecs import MsgPack >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object, ... object_codec=MsgPack()) >>> z Example with some filters, and also storing chunks separately from metadata:: >>> from numcodecs import Quantize, Adler32 >>> store, chunk_store = dict(), dict() >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8', ... filters=[Quantize(digits=2, dtype='f8'), Adler32()], ... store=store, chunk_store=chunk_store) >>> z """""" if zarr_version is None and store is None: zarr_version = getattr(chunk_store, '_store_version', DEFAULT_ZARR_VERSION) # handle polymorphic store arg store = normalize_store_arg(store, zarr_version=zarr_version) zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION) # API compatibility with h5py compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs) # optional array metadata if dimension_separator is None: dimension_separator = getattr(store, ""_dimension_separator"", None) else: store_separator = getattr(store, ""_dimension_separator"", None) if store_separator not in (None, dimension_separator): raise ValueError( f""Specified dimension_separator: {dimension_separator}"" f""conflicts with store's separator: "" f""{store_separator}"") dimension_separator = normalize_dimension_separator(dimension_separator) if zarr_version > 2 and path is None: raise ValueError(""path must be supplied to initialize a zarr v3 array"") # initialize array metadata init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, overwrite=overwrite, path=path, chunk_store=chunk_store, filters=filters, object_codec=object_codec, dimension_separator=dimension_separator, storage_transformers=storage_transformers) # instantiate array z = Array(store, path=path, chunk_store=chunk_store, synchronizer=synchronizer, cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only, write_empty_chunks=write_empty_chunks) return z ","def create(shape, chunks=True, dtype=None, compressor='default', fill_value=0, order='C', store=None, synchronizer=None, overwrite=False, path=None, chunk_store=None, filters=None, cache_metadata=True, cache_attrs=True, read_only=False, object_codec=None, dimension_separator=None, write_empty_chunks=True, storage_transformers=None, *, zarr_version=None, **kwargs): """"""Create an array. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. If an int, the chunk size in each dimension will be given by the value of `chunks`. Default is True. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. store : MutableMapping or string Store or path to directory in file system or name of zip file. synchronizer : object, optional Array synchronizer. overwrite : bool, optional If True, delete all pre-existing data in `store` at `path` before creating the array. path : string, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence of Codecs, optional Sequence of filters to use to encode chunk data prior to compression. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. read_only : bool, optional True if array should be protected against modification. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. dimension_separator : {'.', '/'}, optional Separator placed between the dimensions of a chunk. .. versionadded:: 2.8 write_empty_chunks : bool, optional If True (default), all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. .. versionadded:: 2.11 storage_transformers : sequence of StorageTransformers, optional Setting storage transformers, changes the storage structure and behaviour of data coming from the underlying store. The transformers are applied in the order of the given sequence. May only be set when using zarr_version 3. .. versionadded:: 2.13 zarr_version : {None, 2, 3}, optional The zarr protocol version of the created array. If None, it will be inferred from ``store`` or ``chunk_store`` if they are provided, otherwise defaulting to 2. Returns ------- z : zarr.core.Array Examples -------- Create an array with default settings:: >>> import zarr >>> z = zarr.create((10000, 10000), chunks=(1000, 1000)) >>> z Create an array with different some different configuration options:: >>> from numcodecs import Blosc >>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE) >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F', ... compressor=compressor) >>> z To create an array with object dtype requires a filter that can handle Python object encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`:: >>> from numcodecs import MsgPack >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object, ... object_codec=MsgPack()) >>> z Example with some filters, and also storing chunks separately from metadata:: >>> from numcodecs import Quantize, Adler32 >>> store, chunk_store = dict(), dict() >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8', ... filters=[Quantize(digits=2, dtype='f8'), Adler32()], ... store=store, chunk_store=chunk_store) >>> z """""" if zarr_version is None and store is None: zarr_version = getattr(chunk_store, '_store_version', DEFAULT_ZARR_VERSION) # handle polymorphic store arg store = normalize_store_arg(store, zarr_version=zarr_version) zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION) # API compatibility with h5py compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs) # optional array metadata if dimension_separator is None: dimension_separator = getattr(store, ""_dimension_separator"", None) else: store_separator = getattr(store, ""_dimension_separator"", None) if store_separator not in (None, dimension_separator): raise ValueError( f""Specified dimension_separator: {dimension_separator}"" f""conflicts with store's separator: "" f""{store_separator}"") dimension_separator = normalize_dimension_separator(dimension_separator) if zarr_version > 2 and path is None: raise ValueError(""path must be supplied to initialize a zarr v3 array"") # initialize array metadata init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, overwrite=overwrite, path=path, chunk_store=chunk_store, filters=filters, object_codec=object_codec, dimension_separator=dimension_separator, storage_transformers=storage_transformers) # instantiate array z = Array(store, path=path, chunk_store=chunk_store, synchronizer=synchronizer, cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only, write_empty_chunks=write_empty_chunks) return z " 32079,"def url_create_relationships(uri, host, files, **kwargs): """""" Return relationships list if create_relationships is on (limited to max_num_of_relationships). Args: uri(str)- queried url, host/ip(str), file(list)- file associated with url, params(dict). Returns: Relationships(list). """""" relationships = [] if kwargs.get('create_relationships'): if host: parsed_host = parse_host(host) if parsed_host == 'domain': relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATIONSHIPS_NAMES.get('hosts'), entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=host, entity_b_type=FeedIndicatorType.Domain, reverse_name=EntityRelationship.Relationships.HOSTS)) if parsed_host == 'ip': relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=host, entity_b_type=FeedIndicatorType.IP, reverse_name=EntityRelationship.Relationships.RELATED_TO)) if files: for file in files: if len(relationships) >= kwargs.get('max_num_of_relationships', 1): break file_sh256 = file.get('SHA256') if file_sh256: relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATIONSHIPS_NAMES.get('related-to'), entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=file_sh256, entity_b_type=FeedIndicatorType.File, reverse_name=EntityRelationship.Relationships.RELATED_TO)) return relationships ","def url_create_relationships(uri, host, files, **kwargs): """""" Return relationships list if create_relationships is on (limited to max_num_of_relationships). Args: uri(str)- queried url, host/ip(str), file(list)- file associated with url, params(dict). Returns: Relationships(list). """""" relationships = [] if kwargs.get('create_relationships'): if host: parsed_host = parse_host(host) if parsed_host == 'domain': relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATIONSHIPS_NAMES.get('hosts'), entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=host, entity_b_type=FeedIndicatorType.Domain, reverse_name=EntityRelationship.Relationships.HOSTS)) if parsed_host == 'ip': relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATED_TO, entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=host, entity_b_type=FeedIndicatorType.IP, reverse_name=EntityRelationship.Relationships.RELATED_TO)) if files: for file in files: if len(relationships) >= kwargs.get('max_num_of_relationships'): break file_sh256 = file.get('SHA256') if file_sh256: relationships.append(EntityRelationship( name=EntityRelationship.Relationships.RELATIONSHIPS_NAMES.get('related-to'), entity_a=uri, entity_a_type=FeedIndicatorType.URL, entity_b=file_sh256, entity_b_type=FeedIndicatorType.File, reverse_name=EntityRelationship.Relationships.RELATED_TO)) return relationships " 4282,"def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist', verbose=None): """"""Compute source depths as distances between vertices and nearest sensor. Parameters ---------- inst : instance of Forward | instance of SourceSpaces The object to select vertices from. info : instance of Info | None The info structure that contains information about the channels with respect to which to compute distances. picks : array-like of int | None Indices of sensors to include in distance calculations. If `None`` (default) then only MEG channels are used. trans : str | instance of Transform | None Either the full path to the head<->MRI transform ``*-trans.fif`` file produced during coregistration, or the Transformation itself. If trans is None, an identity matrix is assumed. Only needed when ``inst`` is a source space in MRI coordinates. mode : str How to compute source depth. 'dist' computes Euclidean distance between vertices and nearest sensors. verbose : bool | str | int | None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation ` for more). Returns ------- depth : array of shape (,n_vertices) The depths of source space vertices with respect to sensors. """""" from .forward import Forward if isinstance(inst, Forward): info = inst['info'] src = inst['src'] elif isinstance(inst, SourceSpaces): src = inst if info is None: raise ValueError('You need to specify an Info object with ' 'information about the channels.') src = inst # Load the head<->MRI transform if necessary if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI: if trans is None: raise ValueError('Source space is in MRI coordinates, but no ' 'head<->MRI transform was given. Please specify' 'the full path to the appropriate *-trans.fif ' 'file as the ""trans"" parameter.') if isinstance(trans, string_types): trans = read_trans(trans, return_all=True) last_exp = None for trans in trans: # we got at least 1 try: trans = _ensure_trans(trans, 'head', 'mri') except Exception as exp: last_exp = exp else: break else: raise last_exp src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri')) print('Transform!') else: src_trans = Transform('head', 'head') # Identity transform dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head') # Select channels to be used for distance calculations if picks is None: picks = pick_types(info, meg=True) if len(picks) > 0: logger.info('Using MEG channels') else: logger.info('Using EEG channels') picks = pick_types(info, eeg=True) # get vertex position in same coordinates as for sensors below src_pos = np.vstack([ apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)]) for s in src ]) # get sensor positions sensor_pos = [] for ch in picks: # MEG channels are in device coordinates, translate them to head if channel_type(info, ch) in ['mag', 'grad']: sensor_pos.append(apply_trans(dev_to_head, info['chs'][ch]['loc'][:3])) else: sensor_pos.append(info['chs'][ch]['loc'][:3]) sensor_pos = np.array(sensor_pos) # minimum distances per vertex depths = distance.cdist(sensor_pos, src_pos).min(axis=0) return depths ","def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist', verbose=None): """"""Compute source depths as distances between vertices and nearest sensor. Parameters ---------- inst : instance of Forward | instance of SourceSpaces The object to select vertices from. info : instance of Info | None The info structure that contains information about the channels with respect to which to compute distances. picks : array-like of int | None Indices of sensors to include in distance calculations. If `None`` (default) then only MEG channels are used. trans : str | instance of Transform | None Either the full path to the head<->MRI transform ``*-trans.fif`` file produced during coregistration, or the Transformation itself. If trans is None, an identity matrix is assumed. Only needed when ``inst`` is a source space in MRI coordinates. mode : str How to compute source depth. 'dist' computes Euclidean distance between vertices and nearest sensors. %(verbose)s Returns ------- depth : array of shape (,n_vertices) The depths of source space vertices with respect to sensors. """""" from .forward import Forward if isinstance(inst, Forward): info = inst['info'] src = inst['src'] elif isinstance(inst, SourceSpaces): src = inst if info is None: raise ValueError('You need to specify an Info object with ' 'information about the channels.') src = inst # Load the head<->MRI transform if necessary if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI: if trans is None: raise ValueError('Source space is in MRI coordinates, but no ' 'head<->MRI transform was given. Please specify' 'the full path to the appropriate *-trans.fif ' 'file as the ""trans"" parameter.') if isinstance(trans, string_types): trans = read_trans(trans, return_all=True) last_exp = None for trans in trans: # we got at least 1 try: trans = _ensure_trans(trans, 'head', 'mri') except Exception as exp: last_exp = exp else: break else: raise last_exp src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri')) print('Transform!') else: src_trans = Transform('head', 'head') # Identity transform dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head') # Select channels to be used for distance calculations if picks is None: picks = pick_types(info, meg=True) if len(picks) > 0: logger.info('Using MEG channels') else: logger.info('Using EEG channels') picks = pick_types(info, eeg=True) # get vertex position in same coordinates as for sensors below src_pos = np.vstack([ apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)]) for s in src ]) # get sensor positions sensor_pos = [] for ch in picks: # MEG channels are in device coordinates, translate them to head if channel_type(info, ch) in ['mag', 'grad']: sensor_pos.append(apply_trans(dev_to_head, info['chs'][ch]['loc'][:3])) else: sensor_pos.append(info['chs'][ch]['loc'][:3]) sensor_pos = np.array(sensor_pos) # minimum distances per vertex depths = distance.cdist(sensor_pos, src_pos).min(axis=0) return depths " 32507,"def set_empty_value_args_policy_update(policy_obj, option, policy_id): """""" The function use the get policy request function to fill the empty arguments in the policy Args: policy_obj (Dict): Dict of policy details option: (str) Policy option policy_id: (str) Policy ID Returns: Tuple. Policy object, the option to configure on the policy, policy id. """""" empty_args_list = [] # Add the empty arguments to empty args list for arg, value in list(policy_obj.items()): if value == '': empty_args_list.append(arg) if option == '': empty_args_list.append(""option"") # Check if there are any empty arguments if len(empty_args_list) > 0: # Fill the empty arguments with the current data using get policy request function policy_details = get_policy_request(policy_id)[0] for arg in empty_args_list: if arg == ""option"": option = policy_details[""option""] else: policy_obj[arg] = policy_details[""policy""][arg] return policy_obj, option, policy_id ","def set_empty_value_args_policy_update(policy_obj, option, policy_id): """""" The function use the get policy request function to fill the empty arguments in the policy Args: policy_obj (Dict): Dict of policy details option: (str) Policy option policy_id: (str) Policy ID Returns: Tuple. Policy object, the option to configure on the policy, policy id. """""" empty_args_list = [] # Add the empty arguments to empty args list for arg, value in policy_obj.items(): if value == '': empty_args_list.append(arg) if option == '': empty_args_list.append(""option"") # Check if there are any empty arguments if len(empty_args_list) > 0: # Fill the empty arguments with the current data using get policy request function policy_details = get_policy_request(policy_id)[0] for arg in empty_args_list: if arg == ""option"": option = policy_details[""option""] else: policy_obj[arg] = policy_details[""policy""][arg] return policy_obj, option, policy_id " 605,"def find_es_docs_for_deleted_domains(): es_doc_counts_by_deleted_domain = defaultdict(dict) for domain in Domain.get_deleted_domain_names(): for hqESQuery in [AppES, CaseES, CaseSearchES, FormES, GroupES, UserES]: query = hqESQuery().domain(domain) count = query.count() if count != 0: es_doc_counts_by_deleted_domain[domain][hqESQuery.index] = count return es_doc_counts_by_deleted_domain ","def find_es_docs_for_deleted_domains(): es_doc_counts_by_deleted_domain = defaultdict(dict) for domain in Domain.get_deleted_domain_names(): for hqESQuery in [AppES, CaseES, CaseSearchES, FormES, GroupES, UserES]: query = hqESQuery().domain(domain) count = query.count() if count: es_doc_counts_by_deleted_domain[domain][hqESQuery.index] = count return es_doc_counts_by_deleted_domain " 28156,"def _extract_single_dataset_into_db(dataset: DataSet, target_conn: SomeConnection, target_exp_id: int) -> None: """""" NB: This function should only be called from within :meth:extract_runs_into_db Insert the given dataset into the specified database file as the latest run. Trying to insert a run already in the DB is a NOOP. Args: dataset: A dataset representing the run to be copied target_conn: connection to the DB. Must be atomically guarded target_exp_id: The exp_id of the (target DB) experiment in which to insert the run """""" if not dataset.completed: raise ValueError('Dataset not completed. An incomplete dataset ' 'can not be copied.') source_conn = dataset.conn run_id = get_runid_from_guid(target_conn, dataset.guid) if run_id != -1: return parspecs = dataset.paramspecs.values() #metadata = dataset.get_metadata() _, target_run_id, target_table_name = create_run(target_conn, target_exp_id, name=dataset.name, guid=dataset.guid, parameters=list(parspecs)) _populate_results_table(source_conn, target_conn, dataset.table_name, target_table_name) mark_run_complete(target_conn, target_run_id) _rewrite_timestamps(target_conn, target_run_id, dataset.run_timestamp_raw, dataset.completed_timestamp_raw) ","def _extract_single_dataset_into_db(dataset: DataSet, target_conn: SomeConnection, target_exp_id: int) -> None: """""" NB: This function should only be called from within :meth:extract_runs_into_db Insert the given dataset into the specified database file as the latest run. Trying to insert a run already in the DB is a NOOP. Args: dataset: A dataset representing the run to be copied target_conn: connection to the DB. Must be atomically guarded target_exp_id: The exp_id of the (target DB) experiment in which to insert the run """""" if not dataset.completed: raise ValueError(f'Dataset with run_id={dataset.run_id} is not completed. An incomplete dataset ' 'can not be copied.') source_conn = dataset.conn run_id = get_runid_from_guid(target_conn, dataset.guid) if run_id != -1: return parspecs = dataset.paramspecs.values() #metadata = dataset.get_metadata() _, target_run_id, target_table_name = create_run(target_conn, target_exp_id, name=dataset.name, guid=dataset.guid, parameters=list(parspecs)) _populate_results_table(source_conn, target_conn, dataset.table_name, target_table_name) mark_run_complete(target_conn, target_run_id) _rewrite_timestamps(target_conn, target_run_id, dataset.run_timestamp_raw, dataset.completed_timestamp_raw) " 31876,"def main() -> None: params: Dict[str, Any] = demisto.params() args: Dict[str, Any] = demisto.args() url = params.get('url') api_key = params.get('api_key') if not params.get('api_key') == '' else None verify_certificate: bool = params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers={}, api_key=api_key) commands = { 'file': file_command, 'malwarebazaar-download-sample': malwarebazaar_download_sample_command, 'malwarebazaar-comment-add': malwarebazaar_comment_add_command, 'malwarebazaar-samples-list': malwarebazaar_samples_list_command, } if command == 'test-module': test_module(client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) ","def main() -> None: params: Dict[str, Any] = demisto.params() args: Dict[str, Any] = demisto.args() url = params.get('url') api_key = params.get('api_key') or None verify_certificate: bool = params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers={}, api_key=api_key) commands = { 'file': file_command, 'malwarebazaar-download-sample': malwarebazaar_download_sample_command, 'malwarebazaar-comment-add': malwarebazaar_comment_add_command, 'malwarebazaar-samples-list': malwarebazaar_samples_list_command, } if command == 'test-module': test_module(client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) " 32523,"def scan_url_command(client: Client, args: dict) -> CommandResults: """""" 1 API Call """""" url = args['url'] raw_response: Dict[str, Any] = {} data: Dict[str, Any] = {} context: Dict[str, Any] = {} headers = ['id', 'url'] try: raw_response = client.url_scan(url) data = raw_response['data'] data['url'] = url context = { f'{INTEGRATION_ENTRY_CONTEXT}.Submission(val.id && val.id === obj.id)': data, 'vtScanID': data.get('id') # BC preservation } except DemistoException as ex: error = ex.res.json().get('error') # Invalid url, probably due to an unknown TLD if error['code'] == 'InvalidArgumentError': data = {'url': url, 'id': '', 'error': error['message']} headers.append('error') else: raise ex return CommandResults( readable_output=tableToMarkdown( 'New url submission:', data, headers=headers ), outputs=context, raw_response=raw_response ) ","def scan_url_command(client: Client, args: dict) -> CommandResults: """""" 1 API Call """""" url = args['url'] raw_response: Dict[str, Any] = {} data: Dict[str, Any] = {} context: Dict[str, Any] = {} headers = ['id', 'url'] try: raw_response = client.url_scan(url) data = raw_response['data'] data['url'] = url context = { f'{INTEGRATION_ENTRY_CONTEXT}.Submission(val.id && val.id === obj.id)': data, 'vtScanID': data.get('id') # BC preservation } except DemistoException as ex: error = ex.res.json().get('error') # Invalid url, probably due to an unknown TLD if error['code'] == 'InvalidArgumentError': data = {'url': url, 'id': '', 'error': error['message']} headers.append('error') else: raise return CommandResults( readable_output=tableToMarkdown( 'New url submission:', data, headers=headers ), outputs=context, raw_response=raw_response ) " 31489,"def get_file_command(credentials: Dict, sensor_id: int, source_path: str, timeout: int = None, delay: float = None): api = CBCloudAPI(**credentials) session = api.select(endpoint_standard.Device, sensor_id).lr_session() if timeout: timeout = int(timeout) if delay: delay = float(delay) file_data = session.get_file(file_name=source_path, timeout=timeout, delay=delay) file_name = ntpath.split(source_path)[1] return fileResult(file_name, file_data) ","def get_file_command(credentials: Dict, sensor_id: int, source_path: str, timeout: int = None, delay: Union[float, str] = None): api = CBCloudAPI(**credentials) session = api.select(endpoint_standard.Device, sensor_id).lr_session() if timeout: timeout = int(timeout) if delay: delay = float(delay) file_data = session.get_file(file_name=source_path, timeout=timeout, delay=delay) file_name = ntpath.split(source_path)[1] return fileResult(file_name, file_data) " 23266,"def html_visit_inheritance_diagram(self: HTMLTranslator, node: inheritance_diagram) -> None: """""" Output the graph for HTML. This will insert a PNG with clickable image map. """""" graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash # Create a mapping from fully-qualified class names to URLs. graphviz_output_format = self.builder.env.config.graphviz_output_format.upper() current_filename = self.builder.current_docname + self.builder.out_suffix current_dir = pathlib.PurePath(current_filename).parent urls = {} pending_xrefs = cast(Iterable[addnodes.pending_xref], node) for child in pending_xrefs: if child.get('refuri') is not None: # Construct the name from the URI if the reference is external via intersphinx if not child.get('internal', True): refname = child['refuri'].rsplit('#', 1)[-1] else: refname = child['reftitle'] # For SVG output, relative URIs need to be re-pathed to where the SVG file will be if graphviz_output_format == 'SVG' and '://' not in child['refuri']: # URI relative to src dir (typically equivalent to stripping all leading ../) uri_rel_to_srcdir = (current_dir / child['refuri']).as_posix() # URI relative to image dir (typically equivalent to prepending ../) uri_rel_to_imagedir = relpath(uri_rel_to_srcdir, self.builder.imagedir) urls[refname] = canon_path(uri_rel_to_imagedir) else: urls[refname] = child['refuri'] elif child.get('refid') is not None: if graphviz_output_format == 'SVG': # URI relative to image dir (typically equivalent to prepending ../) uri_rel_to_imagedir = relpath(current_filename, self.builder.imagedir) urls[child['reftitle']] = canon_path(uri_rel_to_imagedir) +\ '#' + child.get('refid') else: urls[child['reftitle']] = '#' + child.get('refid') dotcode = graph.generate_dot(name, urls, env=self.builder.env) render_dot_html(self, node, dotcode, {}, 'inheritance', 'inheritance', alt='Inheritance diagram of ' + node['content']) raise nodes.SkipNode ","def html_visit_inheritance_diagram(self: HTMLTranslator, node: inheritance_diagram) -> None: """""" Output the graph for HTML. This will insert a PNG with clickable image map. """""" graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash # Create a mapping from fully-qualified class names to URLs. graphviz_output_format = self.builder.env.config.graphviz_output_format.upper() current_filename = self.builder.current_docname + self.builder.out_suffix current_dir = dirname(current_filename) urls = {} pending_xrefs = cast(Iterable[addnodes.pending_xref], node) for child in pending_xrefs: if child.get('refuri') is not None: # Construct the name from the URI if the reference is external via intersphinx if not child.get('internal', True): refname = child['refuri'].rsplit('#', 1)[-1] else: refname = child['reftitle'] # For SVG output, relative URIs need to be re-pathed to where the SVG file will be if graphviz_output_format == 'SVG' and '://' not in child['refuri']: # URI relative to src dir (typically equivalent to stripping all leading ../) uri_rel_to_srcdir = (current_dir / child['refuri']).as_posix() # URI relative to image dir (typically equivalent to prepending ../) uri_rel_to_imagedir = relpath(uri_rel_to_srcdir, self.builder.imagedir) urls[refname] = canon_path(uri_rel_to_imagedir) else: urls[refname] = child['refuri'] elif child.get('refid') is not None: if graphviz_output_format == 'SVG': # URI relative to image dir (typically equivalent to prepending ../) uri_rel_to_imagedir = relpath(current_filename, self.builder.imagedir) urls[child['reftitle']] = canon_path(uri_rel_to_imagedir) +\ '#' + child.get('refid') else: urls[child['reftitle']] = '#' + child.get('refid') dotcode = graph.generate_dot(name, urls, env=self.builder.env) render_dot_html(self, node, dotcode, {}, 'inheritance', 'inheritance', alt='Inheritance diagram of ' + node['content']) raise nodes.SkipNode " 47816,"def test__templater_jinja_dynamic_variable_no_violations(): """"""Test no templater violation for variable defined within template."""""" t = JinjaTemplater(override_context=dict(blah=""foo"")) instr = """"""{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """""" outstr, vs = t.process(in_str=instr, fname=""test"", config=FluffConfig()) assert str(outstr) == ""\n \n SELECT 1\n\n"" # Check we have violations. assert not len(vs) > 0 ","def test__templater_jinja_dynamic_variable_no_violations(): """"""Test no templater violation for variable defined within template."""""" t = JinjaTemplater(override_context=dict(blah=""foo"")) instr = """"""{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """""" outstr, vs = t.process(in_str=instr, fname=""test"", config=FluffConfig()) assert str(outstr) == ""\n \n SELECT 1\n\n"" # Check we have violations. assert len(vs) == 0 " 57006,"def _get_all_test_targets_from_path(test_path=None, include_load_tests=True): """"""Returns a list of test targets for all classes under test_path containing tests. """""" def _get_test_target_classes(path): """"""Returns a list of all test classes in a given test file path. Args: path: str. The path of the test file from which all test classes are to be extracted. Returns: list. A list of all test classes in a given test file path. """""" class_names = [] test_target_path = os.path.relpath( path, os.getcwd())[:-3].replace('/', '.') python_module = importlib.import_module(test_target_path) for name, clazz in inspect.getmembers( python_module, predicate=inspect.isclass): if unittest.TestCase in inspect.getmro(clazz): class_names.append(name) return [ '%s.%s' % (test_target_path, class_name) for class_name in class_names] base_path = os.path.join(os.getcwd(), test_path or '') result = [] excluded_dirs = ['.git', 'third_party', 'core/tests', 'node_modules', 'venv'] for root in os.listdir(base_path): if any([s in root for s in excluded_dirs]): continue if root.endswith('_test.py'): result = result + ( _get_test_target_classes(os.path.join(base_path, root))) for subroot, _, files in os.walk(os.path.join(base_path, root)): if _LOAD_TESTS_DIR in subroot and include_load_tests: for f in files: if f.endswith('_test.py'): result = result + ( _get_test_target_classes(os.path.join(subroot, f))) for f in files: if (f.endswith('_test.py') and os.path.join('core', 'tests') not in subroot): result = result + ( _get_test_target_classes(os.path.join(subroot, f))) return result ","def _get_all_test_targets_from_path(test_path=None, include_load_tests=True): """"""Returns a list of test targets for all classes under test_path containing tests. """""" def _get_test_target_classes(path): """"""Returns a list of all test classes in a given test file path. Args: path: str. The path of the test file from which all test classes are to be extracted. Returns: list. A list of all test classes in a given test file path. """""" class_names = [] test_target_path = os.path.relpath( path, os.getcwd())[:-3].replace('/', '.') python_module = importlib.import_module(test_target_path) for name, clazz in inspect.getmembers( python_module, predicate=inspect.isclass): if unittest.TestCase in inspect.getmro(clazz): class_names.append(name) return [ '%s.%s' % (test_target_path, class_name) for class_name in class_names] base_path = os.path.join(os.getcwd(), test_path or '') result = [] excluded_dirs = [ '.git', 'third_party', 'core/tests', 'node_modules', 'venv'] for root in os.listdir(base_path): if any([s in root for s in excluded_dirs]): continue if root.endswith('_test.py'): result = result + ( _get_test_target_classes(os.path.join(base_path, root))) for subroot, _, files in os.walk(os.path.join(base_path, root)): if _LOAD_TESTS_DIR in subroot and include_load_tests: for f in files: if f.endswith('_test.py'): result = result + ( _get_test_target_classes(os.path.join(subroot, f))) for f in files: if (f.endswith('_test.py') and os.path.join('core', 'tests') not in subroot): result = result + ( _get_test_target_classes(os.path.join(subroot, f))) return result " 39605,"def compile_enum_path( expr: qlast.Path, *, source: s_types.Type, ctx: context.ContextLevel) -> irast.Set: assert isinstance(source, s_scalars.ScalarType) enum_values = source.get_enum_values(ctx.env.schema) assert enum_values is not None nsteps = len(expr.steps) if nsteps == 1: raise errors.QueryError( f""'{source.get_displayname(ctx.env.schema)}' enum "" f""path expression lacks an enum member name, as in "" f""'{source.get_displayname(ctx.env.schema)}.{enum_values[0]}'"", context=expr.steps[0].context, ) step2 = expr.steps[1] if not isinstance(step2, qlast.Ptr): raise errors.QueryError( f""an enum member name must follow enum type name in the path, "" f""as in "" f""'{source.get_displayname(ctx.env.schema)}.{enum_values[0]}'"", context=step2.context, ) step2_direction = s_pointers.PointerDirection.Outbound if step2.direction is not None: step2_direction = s_pointers.PointerDirection(step2.direction) if step2_direction is not s_pointers.PointerDirection.Outbound: raise errors.QueryError( f""enum types do not support backward link navigation"", context=step2.context, ) if step2.type == 'property': raise errors.QueryError( f""enum types do not support link properties"", context=step2.context, ) if nsteps > 2: raise errors.QueryError( f""enum types only support two-step paths: "" f""`enum name`.`member name`"", context=expr.steps[2].context, ) return enum_indirection_set( source=source, ptr_name=step2.ptr.name, source_context=expr.context, ctx=ctx, ) ","def compile_enum_path( expr: qlast.Path, *, source: s_types.Type, ctx: context.ContextLevel) -> irast.Set: assert isinstance(source, s_scalars.ScalarType) enum_values = source.get_enum_values(ctx.env.schema) assert enum_values is not None nsteps = len(expr.steps) if nsteps == 1: raise errors.QueryError( f""'{source.get_displayname(ctx.env.schema)}' enum "" f""path expression lacks an enum member name, as in "" f""'{source.get_displayname(ctx.env.schema)}.{enum_values[0]}'"", context=expr.steps[0].context, ) step2 = expr.steps[1] if not isinstance(step2, qlast.Ptr): raise errors.QueryError( f""an enum member name must follow enum type name in the path, "" f""as in "" f""'{source.get_displayname(ctx.env.schema)}.{enum_values[0]}'"", context=step2.context, ) step2_direction = s_pointers.PointerDirection.Outbound if step2.direction is not None: step2_direction = s_pointers.PointerDirection(step2.direction) if step2_direction is not s_pointers.PointerDirection.Outbound: raise errors.QueryError( f""enum types do not support backlink navigation"", context=step2.context, ) if step2.type == 'property': raise errors.QueryError( f""enum types do not support link properties"", context=step2.context, ) if nsteps > 2: raise errors.QueryError( f""enum types only support two-step paths: "" f""`enum name`.`member name`"", context=expr.steps[2].context, ) return enum_indirection_set( source=source, ptr_name=step2.ptr.name, source_context=expr.context, ctx=ctx, ) " 57168,"def get_classroom_url_fragment_for_topic_id(topic_id: str) -> str: """"""Returns the classroom url fragment for the provided topic id. Args: topic_id: str. The topic id. Returns: str. Returns the classroom url fragment for a topic. """""" for classroom_dict in config_domain.CLASSROOM_PAGES_DATA.value: if topic_id in classroom_dict['topic_ids']: # As config_property in config domain is set to Any, we need type # casting to return a string value. return str(classroom_dict['url_fragment']) # As it is described in 'core/constants/parse_json_from_ts', we are # casting the type to change it from Any to string to return # string type value. return str(constants.CLASSROOM_URL_FRAGMENT_FOR_UNATTACHED_TOPICS) ","def get_classroom_url_fragment_for_topic_id(topic_id: str) -> str: """"""Returns the classroom url fragment for the provided topic id. Args: topic_id: str. The topic id. Returns: str. Returns the classroom url fragment for a topic. """""" for classroom_dict in config_domain.CLASSROOM_PAGES_DATA.value: if topic_id in classroom_dict['topic_ids']: # As config_property in config domain is set to Any, we need type # casting to return a string value. return str(classroom_dict['url_fragment']) # As it is described in 'core/constants/parse_json_from_ts', we are # casting the type to change it from Any to string to return # string type value. return str(constants.CLASSROOM_URL_FRAGMENT_FOR_UNATTACHED_TOPICS) " 35743,"def rotate_image_pil( img: PIL.Image.Image, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST, expand: bool = False, fill: Optional[List[float]] = None, center: Optional[List[float]] = None, ) -> PIL.Image.Image: if center is not None and expand: warnings.warn(""If provided center argument is ignored if expand is True"") center = None return _FP.rotate( img, angle, interpolation=pil_modes_mapping[interpolation], expand=expand, fill=fill, center=center ) ","def rotate_image_pil( img: PIL.Image.Image, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST, expand: bool = False, fill: Optional[List[float]] = None, center: Optional[List[float]] = None, ) -> PIL.Image.Image: if center is not None and expand: warnings.warn(""The provided center argument is ignored if expand is True"") center = None return _FP.rotate( img, angle, interpolation=pil_modes_mapping[interpolation], expand=expand, fill=fill, center=center ) " 31585,"def get_ip_neighbors_command(client, args): ipaddress = args.get('ipaddress') res = client.get_ip_neighbors(ipaddress=ipaddress) readable_output = tableToMarkdown( f""IP neighbors for {ipaddress}:"", [{ ""IP"": x.get('ip', ''), ""Hostnames"": x.get('hostnames', None), ""Sites"": x.get('sites', 0), ""Ports"": x.get('ports', None), ""Active Egress"": x.get('active_egress') } for x in res], [""IP"", ""Hostnames"", ""Sites"", ""Ports"", ""Active Egress""]) command_results = CommandResults( outputs_prefix=f""SecurityTrails.IP"", outputs_key_field=""ip"", outputs={ ""ip"": ipaddress, ""blocks"": res }, readable_output=readable_output ) return_results(command_results) create_standard_ip_context( ip_data=[{ ""Address"": x.get('ip').split(""/"")[0] } for x in res]) ","def get_ip_neighbors_command(client, args): ipaddress = args.get('ipaddress') res = client.get_ip_neighbors(ipaddress=ipaddress) readable_output = tableToMarkdown( f""IP neighbors for {ipaddress}:"", [{ ""IP"": x.get('ip', ''), ""Hostnames"": x.get('hostnames', None), ""Sites"": x.get('sites', 0), ""Ports"": x.get('ports', None), ""Active Egress"": x.get('active_egress') } for x in res], [""IP"", ""Hostnames"", ""Sites"", ""Ports"", ""Active Egress""]) command_results = CommandResults( outputs_prefix=""SecurityTrails.IP"", outputs_key_field=""ip"", outputs={ ""ip"": ipaddress, ""blocks"": res }, readable_output=readable_output ) return_results(command_results) create_standard_ip_context( ip_data=[{ ""Address"": x.get('ip').split(""/"")[0] } for x in res]) " 31280,"def parse_reports_relationships(reports: List, sub_reports: List, matched_relationships: Dict, id_to_object: Dict, courses_of_action_products: Dict) -> Tuple[list, list]: """"""Parse the relationships between reports' malware to attack-patterns and indicators. Args: reports: a list of reports. sub_reports: a list of sub-reports. matched_relationships (Dict): a dict of relationships in the form of `id: list(related_ids)`. id_to_object: a dict in the form of `id: stix_object`. courses_of_action_products (Dict): Returns: A list of processed reports. """""" indicators = [] for report in reports: related_ids = [] # Since main reports dont hold their own relationships theres a need to collect them. related_sub_reports = [object_id for object_id in report.get('rawJSON', {}).get('unit42_object_refs', []) if object_id.startswith('report')] report_malware_set = set() for sub_report in sub_reports: if sub_report.get('id') in related_sub_reports: # Indicators relationship only comes from being related to the malware objects of the report. related_ids += [id_ for id_ in matched_relationships.get(sub_report.get('id'), []) if not id_.startswith('indicator')] for object_id in sub_report.get('object_refs', []): if object_id.startswith('malware'): report_malware_set.add(object_id) elif object_id.startswith('attack-pattern'): related_ids.append(object_id) report['fields']['feedrelatedindicators'] = [] for malware_id in report_malware_set: related_ids += matched_relationships.get(malware_id, []) malware_object = id_to_object.get(malware_id) if malware_object: report['fields']['feedrelatedindicators'].extend([{ 'type': 'Malware', 'value': malware_object.get('name'), 'description': malware_object.get( 'description', ', '.join(malware_object.get('labels', ['No description provided.']))) }]) for relation in related_ids: relation_object = id_to_object.get(relation) if not relation_object: continue if relation.startswith('attack-pattern'): type_name = 'MITRE ATT&CK' relation_value_field = relation_object.get('external_references') elif relation.startswith('indicator'): # Need to create the connection only to file hashes if not relation_object.get('pattern').startswith('[file:'): continue type_name = 'Indicator' relation_value_field = relation_object.get('name') elif relation.startswith('malware'): type_name = 'Malware' relation_value_field = relation_object.get('name') else: continue if isinstance(relation_value_field, str): report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': relation_value_field, 'description': ', '.join(relation_object.get('labels', ['No description provided.'])) }]) indicator_val = relation_value_field else: all_urls = [] external_id = '' for item in relation_value_field: if 'url' in item: all_urls.append(item.get('url')) if 'external_id' in item: external_id = item.get('external_id') report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': external_id, 'description': ','.join(all_urls) }]) indicator_val = external_id # create MITRE ATT&CK indicator if indicator_val and type_name == 'MITRE ATT&CK': # populate mitre course of action data from the relevant relationships relationship = relation_object.get('id') courses_of_action: Dict[str, List] = {} if matched_relationships.get(relationship): for source in matched_relationships[relationship]: if source.startswith('course-of-action') and id_to_object.get(source): relationship_product = courses_of_action_products[source] if not courses_of_action.get(relationship_product): courses_of_action[relationship_product] = [] courses_of_action[relationship_product].append(id_to_object[source]) indicators.append({ ""value"": indicator_val, ""type"": 'MITRE ATT&CK', ""fields"": { ""firstseenbysource"": relation_object.get('created'), ""indicatoridentification"": relation_object.get('id'), ""tags"": [], ""modified"": relation_object.get('modified'), ""reportedby"": 'Unit42', ""mitrecourseofaction"": create_course_of_action_field(courses_of_action) } }) return reports, indicators ","def parse_reports_relationships(reports: List, sub_reports: List, matched_relationships: Dict, id_to_object: Dict, courses_of_action_products: Dict) -> Tuple[list, list]: """"""Parse the relationships between reports' malware to attack-patterns and indicators. Args: reports: a list of reports. sub_reports: a list of sub-reports. matched_relationships (Dict): a dict of relationships in the form of `id: list(related_ids)`. id_to_object: a dict in the form of `id: stix_object`. courses_of_action_products (Dict): Returns: A list of processed reports. """""" indicators = [] for report in reports: related_ids = [] # Since main reports dont hold their own relationships theres a need to collect them. related_sub_reports = [object_id for object_id in report.get('rawJSON', {}).get('unit42_object_refs', []) if object_id.startswith('report')] report_malware_set = set() for sub_report in sub_reports: if sub_report.get('id') in related_sub_reports: # Indicators relationship only comes from being related to the malware objects of the report. related_ids += [id_ for id_ in matched_relationships.get(sub_report.get('id'), []) if not id_.startswith('indicator')] for object_id in sub_report.get('object_refs', []): if object_id.startswith('malware'): report_malware_set.add(object_id) elif object_id.startswith('attack-pattern'): related_ids.append(object_id) report['fields']['feedrelatedindicators'] = [] for malware_id in report_malware_set: related_ids += matched_relationships.get(malware_id, []) malware_object = id_to_object.get(malware_id) if malware_object: report['fields']['feedrelatedindicators'].extend([{ 'type': 'Malware', 'value': malware_object.get('name'), 'description': malware_object.get( 'description', ', '.join(malware_object.get('labels', ['No description provided.']))) }]) for relation in related_ids: relation_object = id_to_object.get(relation) if not relation_object: continue if relation.startswith('attack-pattern'): type_name = 'MITRE ATT&CK' relation_value_field = relation_object.get('external_references') elif relation.startswith('indicator'): # Need to create the connection only to file hashes if not relation_object.get('pattern').startswith('[file:'): continue type_name = 'Indicator' relation_value_field = relation_object.get('name') elif relation.startswith('malware'): type_name = 'Malware' relation_value_field = relation_object.get('name') else: continue if isinstance(relation_value_field, str): report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': relation_value_field, 'description': ', '.join(relation_object.get('labels', ['No description provided.'])) }]) indicator_val = relation_value_field else: all_urls = [] external_id = '' for item in relation_value_field: if 'url' in item: all_urls.append(item.get('url')) if 'external_id' in item: external_id = item.get('external_id') report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': external_id, 'description': ','.join(all_urls) }]) indicator_val = external_id # create MITRE ATT&CK indicator if indicator_val and type_name == 'MITRE ATT&CK': # populate mitre course of action data from the relevant relationships relationship = relation_object.get('id') courses_of_action: Dict[str, List] = {} if relationship in matched_relationships: for source in matched_relationships[relationship]: if source.startswith('course-of-action') and id_to_object.get(source): relationship_product = courses_of_action_products[source] if not courses_of_action.get(relationship_product): courses_of_action[relationship_product] = [] courses_of_action[relationship_product].append(id_to_object[source]) indicators.append({ ""value"": indicator_val, ""type"": 'MITRE ATT&CK', ""fields"": { ""firstseenbysource"": relation_object.get('created'), ""indicatoridentification"": relation_object.get('id'), ""tags"": [], ""modified"": relation_object.get('modified'), ""reportedby"": 'Unit42', ""mitrecourseofaction"": create_course_of_action_field(courses_of_action) } }) return reports, indicators " 48876,"def task_group_to_grid(task_item_or_group, dag, dag_runs, session): """""" Create a nested dict representation of this TaskGroup and its children used to construct the Graph. """""" if isinstance(task_item_or_group, AbstractOperator): return { 'id': task_item_or_group.task_id, 'instances': [ ts for ts in [wwwutils.get_task_summary(dr, task_item_or_group, session) for dr in dag_runs] if ts is not None ], 'label': task_item_or_group.label, 'extra_links': task_item_or_group.extra_links, 'is_mapped': task_item_or_group.is_mapped, } # Task Group task_group = task_item_or_group children = [task_group_to_grid(child, dag, dag_runs, session) for child in task_group.topological_sort()] def get_summary(dag_run, children): child_instances = [child['instances'] for child in children if 'instances' in child] child_instances = [item for sublist in child_instances for item in sublist] children_start_dates = [item['start_date'] for item in child_instances if item] children_end_dates = [item['end_date'] for item in child_instances if item] children_states = [item['state'] for item in child_instances if item] group_state = None for state in wwwutils.priority: if state in children_states: group_state = state break group_start_date = wwwutils.datetime_to_string( min((timezone.parse(date) for date in children_start_dates if date), default=None) ) group_end_date = wwwutils.datetime_to_string( max((timezone.parse(date) for date in children_end_dates if date), default=None) ) return { 'task_id': task_group.group_id, 'run_id': dag_run.run_id, 'state': group_state, 'start_date': group_start_date, 'end_date': group_end_date, } group_summaries = [get_summary(dr, children) for dr in dag_runs] return { 'id': task_group.group_id, 'label': task_group.label, 'children': children, 'tooltip': task_group.tooltip, 'instances': group_summaries, } ","def task_group_to_grid(task_item_or_group, dag, dag_runs, session): """""" Create a nested dict representation of this TaskGroup and its children used to construct the Graph. """""" if isinstance(task_item_or_group, AbstractOperator): return { 'id': task_item_or_group.task_id, 'instances': [ ts for ts in (wwwutils.get_task_summary(dr, task_item_or_group, session) for dr in dag_runs) if ts is not None ], 'label': task_item_or_group.label, 'extra_links': task_item_or_group.extra_links, 'is_mapped': task_item_or_group.is_mapped, } # Task Group task_group = task_item_or_group children = [task_group_to_grid(child, dag, dag_runs, session) for child in task_group.topological_sort()] def get_summary(dag_run, children): child_instances = [child['instances'] for child in children if 'instances' in child] child_instances = [item for sublist in child_instances for item in sublist] children_start_dates = [item['start_date'] for item in child_instances if item] children_end_dates = [item['end_date'] for item in child_instances if item] children_states = [item['state'] for item in child_instances if item] group_state = None for state in wwwutils.priority: if state in children_states: group_state = state break group_start_date = wwwutils.datetime_to_string( min((timezone.parse(date) for date in children_start_dates if date), default=None) ) group_end_date = wwwutils.datetime_to_string( max((timezone.parse(date) for date in children_end_dates if date), default=None) ) return { 'task_id': task_group.group_id, 'run_id': dag_run.run_id, 'state': group_state, 'start_date': group_start_date, 'end_date': group_end_date, } group_summaries = [get_summary(dr, children) for dr in dag_runs] return { 'id': task_group.group_id, 'label': task_group.label, 'children': children, 'tooltip': task_group.tooltip, 'instances': group_summaries, } " 7491,"def test_open_files(): for filename in get_pkg_data_filenames('data', pattern='*.xml'): if (filename.endswith('custom_datatype.xml') or filename.endswith('timesys_errors.xml')): continue parse(filename) ","def test_open_files(): for filename in get_pkg_data_filenames('data', pattern='*.xml'): if (filename.endswith('custom_datatype.xml') or filename.endswith('timesys_errors.xml')): continue parse(filename) " 48008,"def main(): args = build_argparser().parse_args() cap = open_images_capture(args.input, args.loop) log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) log.info('Reading model {}'.format(args.model)) model = models.Classification(ie, args.model, ntop=args.ntop, labels=args.labels, logger=log) log_blobs_info(model) gt_indices = None if args.ground_truth and isinstance(cap, DirReader): gt_indices = load_ground_truth(args.ground_truth, cap.names, len(model.labels)) async_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('The model {} is loaded to {}'.format(args.model, args.device)) log_runtime_settings(async_pipeline.exec_net, set(parse_devices(args.device))) next_frame_id = 0 next_frame_id_to_show = 0 metrics = PerformanceMetrics() render_metrics = PerformanceMetrics() presenter = None output_transform = None video_writer = cv2.VideoWriter() correct_predictions = 0 while True: if async_pipeline.callback_exceptions: raise async_pipeline.callback_exceptions[0] # Process all completed requests results = async_pipeline.get_result(next_frame_id_to_show) if results: classifications, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if args.ground_truth and gt_indices: if gt_indices[next_frame_id_to_show] in [cl[0] for cl in classifications]: correct_predictions += 1 gt_id = gt_indices[next_frame_id_to_show] if gt_indices else None if len(classifications) and args.raw_output_message: print_raw_results(classifications, next_frame_id_to_show, model.labels, gt_id) presenter.drawGraphs(frame) rendering_start_time = perf_counter() frame = draw_labels(frame, classifications, output_transform, model.labels, gt_id) render_metrics.update(rendering_start_time) metrics.update(start_time, frame) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(frame) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow('Classification Results', frame) key = cv2.waitKey(args.pause) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) continue if async_pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError(""Can't read an image from the input"") break if next_frame_id == 0: output_transform = models.OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter(args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError(""Can't open video writer"") # Submit for inference async_pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time}) next_frame_id += 1 else: # Wait for empty request async_pipeline.await_any() async_pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = async_pipeline.get_result(next_frame_id_to_show) while results is None: results = async_pipeline.get_result(next_frame_id_to_show) classifications, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if args.ground_truth and gt_indices: if gt_indices[next_frame_id_to_show] in [cl[0] for cl in classifications]: correct_predictions += 1 gt_id = gt_indices[next_frame_id_to_show] if gt_indices else None if len(classifications) and args.raw_output_message: print_raw_results(classifications, next_frame_id_to_show, model.labels, gt_id) presenter.drawGraphs(frame) rendering_start_time = perf_counter() frame = draw_labels(frame, classifications, output_transform, model.labels, gt_id) render_metrics.update(rendering_start_time) metrics.update(start_time, frame) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(frame) if not args.no_show: cv2.imshow('Classification Results', frame) key = cv2.waitKey(args.pause) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) if args.ground_truth: log.info('Accuracy (top {}): {:.1%}'.format(args.ntop, correct_predictions / next_frame_id)) metrics.log_total() log_latency_per_stage(cap.reader_metrics.get_latency(), async_pipeline.preprocess_metrics.get_latency(), async_pipeline.inference_metrics.get_latency(), async_pipeline.postprocess_metrics.get_latency(), render_metrics.get_latency()) for rep in presenter.reportMeans(): log.info(rep) ","def main(): args = build_argparser().parse_args() cap = open_images_capture(args.input, args.loop) log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) log.info('Reading model {}'.format(args.model)) model = models.Classification(ie, args.model, ntop=args.ntop, labels=args.labels, logger=log) log_blobs_info(model) gt_indices = None if args.ground_truth and cap.get_type() == 'DIR': gt_indices = load_ground_truth(args.ground_truth, cap.names, len(model.labels)) async_pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('The model {} is loaded to {}'.format(args.model, args.device)) log_runtime_settings(async_pipeline.exec_net, set(parse_devices(args.device))) next_frame_id = 0 next_frame_id_to_show = 0 metrics = PerformanceMetrics() render_metrics = PerformanceMetrics() presenter = None output_transform = None video_writer = cv2.VideoWriter() correct_predictions = 0 while True: if async_pipeline.callback_exceptions: raise async_pipeline.callback_exceptions[0] # Process all completed requests results = async_pipeline.get_result(next_frame_id_to_show) if results: classifications, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if args.ground_truth and gt_indices: if gt_indices[next_frame_id_to_show] in [cl[0] for cl in classifications]: correct_predictions += 1 gt_id = gt_indices[next_frame_id_to_show] if gt_indices else None if len(classifications) and args.raw_output_message: print_raw_results(classifications, next_frame_id_to_show, model.labels, gt_id) presenter.drawGraphs(frame) rendering_start_time = perf_counter() frame = draw_labels(frame, classifications, output_transform, model.labels, gt_id) render_metrics.update(rendering_start_time) metrics.update(start_time, frame) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(frame) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow('Classification Results', frame) key = cv2.waitKey(args.pause) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) continue if async_pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError(""Can't read an image from the input"") break if next_frame_id == 0: output_transform = models.OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter(args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError(""Can't open video writer"") # Submit for inference async_pipeline.submit_data(frame, next_frame_id, {'frame': frame, 'start_time': start_time}) next_frame_id += 1 else: # Wait for empty request async_pipeline.await_any() async_pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = async_pipeline.get_result(next_frame_id_to_show) while results is None: results = async_pipeline.get_result(next_frame_id_to_show) classifications, frame_meta = results frame = frame_meta['frame'] start_time = frame_meta['start_time'] if args.ground_truth and gt_indices: if gt_indices[next_frame_id_to_show] in [cl[0] for cl in classifications]: correct_predictions += 1 gt_id = gt_indices[next_frame_id_to_show] if gt_indices else None if len(classifications) and args.raw_output_message: print_raw_results(classifications, next_frame_id_to_show, model.labels, gt_id) presenter.drawGraphs(frame) rendering_start_time = perf_counter() frame = draw_labels(frame, classifications, output_transform, model.labels, gt_id) render_metrics.update(rendering_start_time) metrics.update(start_time, frame) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(frame) if not args.no_show: cv2.imshow('Classification Results', frame) key = cv2.waitKey(args.pause) ESC_KEY = 27 # Quit. if key in {ord('q'), ord('Q'), ESC_KEY}: break presenter.handleKey(key) if args.ground_truth: log.info('Accuracy (top {}): {:.1%}'.format(args.ntop, correct_predictions / next_frame_id)) metrics.log_total() log_latency_per_stage(cap.reader_metrics.get_latency(), async_pipeline.preprocess_metrics.get_latency(), async_pipeline.inference_metrics.get_latency(), async_pipeline.postprocess_metrics.get_latency(), render_metrics.get_latency()) for rep in presenter.reportMeans(): log.info(rep) " 3279,"def find_measurements_histogram_params( measurements, num_buckets, min_value, max_value, precision, query, params ): multiplier = int(10 ** precision) # finding the bounds might result in None if there isn't sufficient data if min_value is None or max_value is None: return HistogramParams(1, 0, multiplier) # A single bucket in a histogram has contains values from [start, end), # meaning that start is inclusive and end is exclusive scaled_min = int(floor(multiplier * min_value)) scaled_max = int(ceil(multiplier * max_value)) # align the first bin with the minimum value start_offset = scaled_min bucket_size = int(ceil((scaled_max - scaled_min) / float(num_buckets))) if bucket_size == 0: bucket_size = 1 # Sometimes the max value lies on the bucket boundary, and since the end # of the bucket is exclusive, it gets excluded. To account for that, we # increase the width of the buckets to cover the max value. if start_offset + num_buckets * bucket_size <= max_value: bucket_size += 1 return HistogramParams(bucket_size, start_offset, multiplier) ","def find_measurements_histogram_params( measurements, num_buckets, min_value, max_value, precision, query, params ): multiplier = int(10 ** precision) # finding the bounds might result in None if there isn't sufficient data if min_value is None or max_value is None: return HistogramParams(1, 0, multiplier) # A single bucket in a histogram contains values from [start, end), # meaning that start is inclusive and end is exclusive scaled_min = int(floor(multiplier * min_value)) scaled_max = int(ceil(multiplier * max_value)) # align the first bin with the minimum value start_offset = scaled_min bucket_size = int(ceil((scaled_max - scaled_min) / float(num_buckets))) if bucket_size == 0: bucket_size = 1 # Sometimes the max value lies on the bucket boundary, and since the end # of the bucket is exclusive, it gets excluded. To account for that, we # increase the width of the buckets to cover the max value. if start_offset + num_buckets * bucket_size <= max_value: bucket_size += 1 return HistogramParams(bucket_size, start_offset, multiplier) " 37284,"def control(operation: Union[Gate, ControlledGate], num_ctrl_qubits: Optional[int] = 1, label: Optional[Union[None, str]] = None, ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate: """"""Return controlled version of gate using controlled rotations. This function first checks the name of the operation to see if it knows of a method from which to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`. If a method is not directly known, it calls the unroller to convert to `x`, `y`, `z`, `h`, `rx`, `ry`, `swap`, `ccx`, `u1`, `u3` and `cx` gates. Args: operation: The gate used to create the ControlledGate. num_ctrl_qubits: The number of controls to add to gate (default=1). label: An optional gate label. ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If specified as a bitstring the length must equal num_ctrl_qubits, MSB on left. If None, use 2**num_ctrl_qubits-1. Returns: Controlled version of gate. Raises: CircuitError: gate contains non-gate in definition """""" from math import pi # pylint: disable=cyclic-import import qiskit.circuit.controlledgate as controlledgate # pylint: disable=unused-import import qiskit.circuit.library.standard_gates.multi_control_rotation_gates q_control = QuantumRegister(num_ctrl_qubits, name='control') q_target = QuantumRegister(operation.num_qubits, name='target') q_ancillae = None # TODO: add qc = QuantumCircuit(q_control, q_target) if operation.name == 'x' or ( isinstance(operation, controlledgate.ControlledGate) and operation.base_gate.name == 'x'): qc.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae) elif operation.name == 'rx': qc.mcrx(operation.definition.data[0][0].params[0], q_control, q_target[0], use_basis_gates=True) elif operation.name == 'ry': qc.mcry(operation.definition.data[0][0].params[0], q_control, q_target[0], q_ancillae, mode='noancilla', use_basis_gates=True) elif operation.name == 'rz': qc.mcrz(operation.definition.data[0][0].params[0], q_control, q_target[0], use_basis_gates=True) else: basis_gates = ['x', 'y', 'z', 'h', 'rx', 'ry', 'swap', 'ccx', 'u1', 'u3', 'cx'] bgate = _unroll_gate(operation, basis_gates) # now we have a bunch of single qubit rotation gates and cx for rule in bgate.definition.data: if rule[0].name == 'u3': theta, phi, lamb = rule[0].params if phi == -pi / 2 and lamb == pi / 2: qc.mcrx(theta, q_control, q_target[rule[1][0].index], use_basis_gates=True) elif phi == 0 and lamb == 0: qc.mcry(theta, q_control, q_target[rule[1][0].index], q_ancillae, use_basis_gates=True) elif theta == 0 and phi == 0: qc.mcrz(lamb, q_control, q_target[rule[1][0].index], use_basis_gates=True) else: qc.mcrz(lamb, q_control, q_target[rule[1][0].index], use_basis_gates=True) qc.mcry(theta, q_control, q_target[rule[1][0].index], q_ancillae, use_basis_gates=True) qc.mcrz(phi, q_control, q_target[rule[1][0].index], use_basis_gates=True) elif rule[0].name == 'u1': qc.mcu1(rule[0].params[0], q_control, q_target[rule[1][0].index]) elif rule[0].name == 'cx' or rule[0].name == 'ccx': additional_control_bits = [bit.index for bit in rule[1][:-1]] qc.mct(q_control[:] + q_target[additional_control_bits], q_target[rule[1][-1].index], q_ancillae) elif rule[0].name == 'x': qc.mct(q_control[:], q_target[rule[1][0].index], q_ancillae) elif rule[0].name == 'z': from qiskit.circuit.library.standard_gates import ZGate mcz = ZGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcz, qargs) elif rule[0].name == 'y': from qiskit.circuit.library.standard_gates import YGate mcy = YGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcy, qargs) elif rule[0].name == 'h': from qiskit.circuit.library.standard_gates import HGate mch = HGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mch, qargs) elif rule[0].name == 'rx': from qiskit.circuit.library.standard_gates import RXGate mcrx = RXGate(rule[0].params[0]).control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcrx, qargs) elif rule[0].name == 'ry': from qiskit.circuit.library.standard_gates import RYGate mcry = RYGate(rule[0].params[0]).control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcry, qargs) elif rule[0].name == 'swap': from qiskit.circuit.library.standard_gates import SwapGate mcswap = SwapGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcswap, qargs) else: raise CircuitError('gate contains non-controllable instructions') if isinstance(operation, controlledgate.ControlledGate): new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state base_name = operation.base_gate.name base_gate = operation.base_gate else: new_num_ctrl_qubits = num_ctrl_qubits new_ctrl_state = ctrl_state base_name = operation.name base_gate = operation # In order to maintain some backward compatibility with gate names this # uses a naming convention where if the number of controls is <=2 the gate # is named like ""cc"", else it is named like # ""c"". if new_num_ctrl_qubits > 2: ctrl_substr = 'c{0:d}'.format(new_num_ctrl_qubits) else: ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c') new_name = '{0}{1}'.format(ctrl_substr, base_name) cgate = controlledgate.ControlledGate(new_name, qc.num_qubits, operation.params, label=label, num_ctrl_qubits=new_num_ctrl_qubits, definition=qc, ctrl_state=new_ctrl_state) cgate.base_gate = base_gate return cgate ","def control(operation: Union[Gate, ControlledGate], num_ctrl_qubits: Optional[int] = 1, label: Optional[Union[None, str]] = None, ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate: """"""Return controlled version of gate using controlled rotations. This function first checks the name of the operation to see if it knows of a method from which to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`. If a method is not directly known, it calls the unroller to convert to `x`, `y`, `z`, `h`, `rx`, `ry`, `swap`, `ccx`, `u1`, `u3` and `cx` gates. Args: operation: The gate used to create the ControlledGate. num_ctrl_qubits: The number of controls to add to gate (default=1). label: An optional gate label. ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If specified as a bitstring the length must equal num_ctrl_qubits, MSB on left. If None, use 2**num_ctrl_qubits-1. Returns: Controlled version of gate. Raises: CircuitError: gate contains non-gate in definition """""" from math import pi # pylint: disable=cyclic-import import qiskit.circuit.controlledgate as controlledgate # pylint: disable=unused-import import qiskit.circuit.library.standard_gates.multi_control_rotation_gates q_control = QuantumRegister(num_ctrl_qubits, name='control') q_target = QuantumRegister(operation.num_qubits, name='target') q_ancillae = None # TODO: add qc = QuantumCircuit(q_control, q_target) if operation.name == 'x' or ( isinstance(operation, controlledgate.ControlledGate) and operation.base_gate.name == 'x'): qc.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae) elif operation.name == 'rx': qc.mcrx(operation.definition.data[0][0].params[0], q_control, q_target[0], use_basis_gates=True) elif operation.name == 'ry': qc.mcry(operation.definition.data[0][0].params[0], q_control, q_target[0], q_ancillae, mode='noancilla', use_basis_gates=True) elif operation.name == 'rz': qc.mcrz(operation.definition.data[0][0].params[0], q_control, q_target[0], use_basis_gates=True) else: basis_gates = ['x', 'y', 'z', 'h', 'rx', 'ry', 'swap', 'ccx', 'u1', 'u3', 'cx'] bgate = _unroll_gate(operation, basis_gates) # now we have a bunch of single qubit rotation gates and cx for rule in bgate.definition.data: if rule[0].name == 'u3': theta, phi, lamb = rule[0].params if phi == -pi / 2 and lamb == pi / 2: qc.mcrx(theta, q_control, q_target[rule[1][0].index], use_basis_gates=True) elif phi == 0 and lamb == 0: qc.mcry(theta, q_control, q_target[rule[1][0].index], q_ancillae, use_basis_gates=True) elif theta == 0 and phi == 0: qc.mcrz(lamb, q_control, q_target[rule[1][0].index], use_basis_gates=True) else: qc.mcrz(lamb, q_control, q_target[rule[1][0].index], use_basis_gates=True) qc.mcry(theta, q_control, q_target[rule[1][0].index], q_ancillae, use_basis_gates=True) qc.mcrz(phi, q_control, q_target[rule[1][0].index], use_basis_gates=True) elif rule[0].name == 'u1': qc.mcu1(rule[0].params[0], q_control, q_target[rule[1][0].index]) elif rule[0].name == 'cx' or rule[0].name == 'ccx': additional_control_bits = [bit.index for bit in rule[1][:-1]] qc.mct(q_control[:] + q_target[additional_control_bits], q_target[rule[1][-1].index], q_ancillae) elif rule[0].name == 'x': qc.mcx(q_control[:], q_target[rule[1][0].index], q_ancillae) elif rule[0].name == 'z': from qiskit.circuit.library.standard_gates import ZGate mcz = ZGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcz, qargs) elif rule[0].name == 'y': from qiskit.circuit.library.standard_gates import YGate mcy = YGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcy, qargs) elif rule[0].name == 'h': from qiskit.circuit.library.standard_gates import HGate mch = HGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mch, qargs) elif rule[0].name == 'rx': from qiskit.circuit.library.standard_gates import RXGate mcrx = RXGate(rule[0].params[0]).control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcrx, qargs) elif rule[0].name == 'ry': from qiskit.circuit.library.standard_gates import RYGate mcry = RYGate(rule[0].params[0]).control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcry, qargs) elif rule[0].name == 'swap': from qiskit.circuit.library.standard_gates import SwapGate mcswap = SwapGate().control(num_ctrl_qubits) qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]] qc.append(mcswap, qargs) else: raise CircuitError('gate contains non-controllable instructions') if isinstance(operation, controlledgate.ControlledGate): new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state base_name = operation.base_gate.name base_gate = operation.base_gate else: new_num_ctrl_qubits = num_ctrl_qubits new_ctrl_state = ctrl_state base_name = operation.name base_gate = operation # In order to maintain some backward compatibility with gate names this # uses a naming convention where if the number of controls is <=2 the gate # is named like ""cc"", else it is named like # ""c"". if new_num_ctrl_qubits > 2: ctrl_substr = 'c{0:d}'.format(new_num_ctrl_qubits) else: ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c') new_name = '{0}{1}'.format(ctrl_substr, base_name) cgate = controlledgate.ControlledGate(new_name, qc.num_qubits, operation.params, label=label, num_ctrl_qubits=new_num_ctrl_qubits, definition=qc, ctrl_state=new_ctrl_state) cgate.base_gate = base_gate return cgate " 53738,"def test_raiden_read_config(tmp_path, cli_runner): config = """""" datadir = ""~/datadir_from_config_file"" network-id = 42 default-reveal-timeout = 21 [log-config] """" = ""DEBUG"" ""raiden.network"" = ""INFO"" ""raiden.transfer"" = ""WARNING"" """""" # Config file should exist at the default location (~/.raiden/config.toml) datadir = f""{tmp_path}/.raiden/"" filename = datadir + ""config.toml"" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, ""w"") as f: f.write(config) cli_command = ""raiden --log-config raiden.transfer:INFO"" expected_args = { # Config file set by default, but path was resolved ""config_file"": (ParameterSource.DEFAULT, filename), # Check mapping of custom internal_name (`network-id` -> `chain_id`) ""chain_id"": (ParameterSource.DEFAULT_MAP, 42), ""default_reveal_timeout"": (ParameterSource.DEFAULT_MAP, 21), # Check for merging of config, where CLI takes precedence for loggers ""log_config"": ( ParameterSource.DEFAULT_MAP, {"""": ""DEBUG"", ""raiden.network"": ""INFO"", ""raiden.transfer"": ""INFO""}, ), # Letting the config overwrite the datadir AFTER it was read in, # does only work when no CLI option for the datadir was given ""datadir"": (ParameterSource.DEFAULT_MAP, f""{tmp_path}/datadir_from_config_file""), } _, kwargs = get_invoked_kwargs(cli_command, cli_runner, ""raiden.ui.cli._run"") assert_invoked_kwargs(kwargs, expected_args) ","def test_raiden_read_config(tmp_path, cli_runner): config = """""" datadir = ""~/datadir_from_config_file"" network-id = 42 default-reveal-timeout = 21 [log-config] """" = ""DEBUG"" ""raiden.network"" = ""INFO"" ""raiden.transfer"" = ""WARNING"" """""" # Config file should exist at the default location (~/.raiden/config.toml) datadir = tmp_path / "".raiden"" datadir.mkdir(parents=True, exist_ok=True) filename = datadir / ""config.toml"" filename.write_text(config) cli_command = ""raiden --log-config raiden.transfer:INFO"" expected_args = { # Config file set by default, but path was resolved ""config_file"": (ParameterSource.DEFAULT, filename), # Check mapping of custom internal_name (`network-id` -> `chain_id`) ""chain_id"": (ParameterSource.DEFAULT_MAP, 42), ""default_reveal_timeout"": (ParameterSource.DEFAULT_MAP, 21), # Check for merging of config, where CLI takes precedence for loggers ""log_config"": ( ParameterSource.DEFAULT_MAP, {"""": ""DEBUG"", ""raiden.network"": ""INFO"", ""raiden.transfer"": ""INFO""}, ), # Letting the config overwrite the datadir AFTER it was read in, # does only work when no CLI option for the datadir was given ""datadir"": (ParameterSource.DEFAULT_MAP, f""{tmp_path}/datadir_from_config_file""), } _, kwargs = get_invoked_kwargs(cli_command, cli_runner, ""raiden.ui.cli._run"") assert_invoked_kwargs(kwargs, expected_args) " 34231,"def get_component_class(component_name: Text) -> Type[""Component""]: """"""Resolve component name to a registered components class."""""" if component_name not in registered_components: if component_name not in old_style_names: try: return class_from_module_path(component_name) except ModuleNotFoundError as e: # when component_name is a path to a class but that path is invalid raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n{}"".format( component_name, e.msg ) ) except AttributeError: # when component_name is a path to a class but the path does not contain that class module_name, _, class_name = component_name.rpartition(""."") raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n"" ""Cannot find class '{}' in module {}."".format( component_name, class_name, module_name ) ) except ImportError: # when component_name is a class name and not part of old_style_names raise Exception( ""Failed to find component class for '{0}'.Unknown component name.\n"" ""Cannot import class '{0}' from global namespace."".format( component_name ) ) else: # DEPRECATED ensures compatibility, remove in future versions logger.warning( ""DEPRECATION warning: your nlu config file "" ""contains old style component name `{}`, "" ""you should change it to its class name: `{}`."" """".format(component_name, old_style_names[component_name]) ) component_name = old_style_names[component_name] return registered_components[component_name] ","def get_component_class(component_name: Text) -> Type[""Component""]: """"""Resolve component name to a registered components class."""""" if component_name not in registered_components: if component_name not in old_style_names: try: return class_from_module_path(component_name) except ModuleNotFoundError as e: # when component_name is a path to a class but that path is invalid raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n{}"".format( component_name, e.msg ) ) except AttributeError: # when component_name is a path to a class but the path does not contain that class module_name, _, class_name = component_name.rpartition(""."") raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n"" ""Cannot find class '{}' in module {}."".format( component_name, class_name, module_name ) ) except ImportError: # when component_name is a class name and not part of old_style_names raise Exception( ""Cannot import class '{0}' from global namespace.\n"" ""Cannot import class '{0}' from global namespace."".format( component_name ) ) else: # DEPRECATED ensures compatibility, remove in future versions logger.warning( ""DEPRECATION warning: your nlu config file "" ""contains old style component name `{}`, "" ""you should change it to its class name: `{}`."" """".format(component_name, old_style_names[component_name]) ) component_name = old_style_names[component_name] return registered_components[component_name] " 2294,"def test_sparse_input_for_fit_predict(): # Test to make sure sparse inputs are accepted for predict # (non-regression test for issue #20049) af = AffinityPropagation(affinity=""euclidean"", random_state=42) rng = np.random.RandomState(42) X = csr_matrix(rng.randint(0, 2, size=(5, 5))) labels = af.fit_predict(X) assert_array_equal(labels, (0, 1, 1, 2, 3)) ","def test_sparse_input_for_fit_predict(): # Test to make sure sparse inputs are accepted for fit_predict # (non-regression test for issue #20049) af = AffinityPropagation(affinity=""euclidean"", random_state=42) rng = np.random.RandomState(42) X = csr_matrix(rng.randint(0, 2, size=(5, 5))) labels = af.fit_predict(X) assert_array_equal(labels, (0, 1, 1, 2, 3)) " 37840,"def test_clib_name(): ""Make sure we get the correct library name for different OS names"" for linux in [""linux"", ""linux2"", ""linux3""]: assert clib_name(linux) == ""libgmt.so"" assert clib_name(""darwin"") == ""libgmt.dylib"" assert clib_name(""win32"", True) == ""gmt_w64.dll"" assert clib_name(""win32"", False) == ""gmt_w32.dll"" with pytest.raises(GMTOSError): clib_name(""meh"") ","def test_clib_name(): ""Make sure we get the correct library name for different OS names"" for linux in [""linux"", ""linux2"", ""linux3""]: assert clib_name(linux) == ""libgmt.so"" assert clib_name(""darwin"") == ""libgmt.dylib"" assert clib_name(""win32"", True) == ""gmt_w64.dll"" assert clib_name(""win32"", is_64bit=False) == ""gmt_w32.dll"" with pytest.raises(GMTOSError): clib_name(""meh"") " 47549,"def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, ) -> GraphModule: """""" Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If True, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=[""input_ids"", ""attention_mask"", ""token_type_ids""]) ``` """""" if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) # Tracing. tracer = HFTracer() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus # _generate_dummy_input, where the model class is needed. traced.class_for_deserialization = model.__class__ traced.device = model.device return traced ","def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, ) -> GraphModule: """""" Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=[""input_ids"", ""attention_mask"", ""token_type_ids""]) ``` """""" if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) # Tracing. tracer = HFTracer() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus # _generate_dummy_input, where the model class is needed. traced.class_for_deserialization = model.__class__ traced.device = model.device return traced " 45575,"def create_ssl_context(ssl_certificate, ssl_keyfile, ssl_password): """"""Create a SSL context if a proper certificate is passed."""""" if ssl_certificate: import ssl ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain( ssl_certificate, keyfile=ssl_keyfile, password=ssl_password ) return ssl_context else: return None ","def create_ssl_context(ssl_certificate, ssl_keyfile, ssl_password): """"""Create a SSL context if a certificate is passed."""""" if ssl_certificate: import ssl ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain( ssl_certificate, keyfile=ssl_keyfile, password=ssl_password ) return ssl_context else: return None " 39089,"def setup(**kwargs): import tomli import tomli_w from hatchling.__about__ import __version__ project_metadata = {} hatch_metadata = {} tool_metadata = {'hatch': hatch_metadata} project_data = { 'build-system': {'requires': [f'hatchling>={__version__}'], 'build-backend': 'hatchling.build'}, 'project': project_metadata, 'tool': tool_metadata, } _parse_setup_cfg(kwargs) _apply_env_vars(kwargs) name = kwargs['name'] project_name = name.replace('_', '-') package_name = package_path = package_source = sorted(kwargs.get('packages') or [name.replace('-', '_')])[0].split( '.' )[0] project_metadata['name'] = project_name if 'description' in kwargs: project_metadata['description'] = kwargs['description'] for readme_file in ('README.md', 'README.rst', 'README.txt'): if os.path.isfile(os.path.join(HERE, readme_file)): project_metadata['readme'] = readme_file break project_metadata['license'] = kwargs.get('license', '') if 'python_requires' in kwargs: project_metadata['requires-python'] = kwargs['python_requires'] author = {} if 'author' in kwargs: author['name'] = kwargs['author'] if 'author_email' in kwargs: author['email'] = kwargs['author_email'] if author: project_metadata['authors'] = [author] if 'keywords' in kwargs: keywords = kwargs['keywords'] if isinstance(keywords, str): keywords = keywords.replace(',', ' ').split() project_metadata['keywords'] = sorted(keywords) if 'classifiers' in kwargs: project_metadata['classifiers'] = sorted(kwargs['classifiers']) fixed_indices = [] final_index = 0 for i, classifier in enumerate(project_metadata['classifiers']): if classifier.startswith('Programming Language :: Python :: '): final_index = i for python_version in ('3.10', '3.11', '3.12'): if classifier.endswith(python_version): fixed_indices.append(i) break for i, index in enumerate(fixed_indices): project_metadata['classifiers'].insert(final_index, project_metadata['classifiers'].pop(index - i)) if 'install_requires' in kwargs: project_metadata['dependencies'] = sorted(kwargs['install_requires'], key=lambda d: d.lower()) project_metadata['dynamic'] = ['version'] if 'extras_require' in kwargs: project_metadata['optional-dependencies'] = { group: sorted(dependencies, key=lambda d: d.lower()) if isinstance(dependencies, list) else dependencies for group, dependencies in sorted(kwargs['extras_require'].items()) } if 'entry_points' in kwargs and isinstance(kwargs['entry_points'], dict): entry_points = {} for entry_point, definitions in kwargs['entry_points'].items(): if isinstance(definitions, str): definitions = [definitions] definitions = dict(sorted(d.replace(' ', '').split('=', 1) for d in definitions)) if entry_point == 'console_scripts': project_metadata['scripts'] = definitions elif entry_point == 'gui_scripts': project_metadata['gui-scripts'] = definitions else: entry_points[entry_point] = definitions if entry_points: project_metadata['entry-points'] = dict(sorted(entry_points.items())) urls = {} if 'url' in kwargs: urls['Homepage'] = kwargs['url'] if 'download_url' in kwargs: urls['Download'] = kwargs['download_url'] if 'project_urls' in kwargs: urls.update(kwargs['project_urls']) if urls: project_metadata['urls'] = dict(sorted(urls.items())) build_targets = {} build_data = {} if 'use_scm_version' in kwargs: project_data['build-system']['requires'].append('hatch-vcs') hatch_metadata['version'] = {'source': 'vcs'} build_data['hooks'] = {'vcs': {'version-file': f'{package_path}/_version.py'}} else: hatch_metadata['version'] = {'path': f'{package_path}/__init__.py'} build_data['targets'] = build_targets if '' in kwargs.get('package_dir', {}): package_source = kwargs['package_dir'][''] package = (kwargs.get('packages') or [package_name])[0] package_path = f'{package_source}/{package}' if package_path != f'src/{package_name}': build_targets.setdefault('wheel', {})['packages'] = [package_path] if kwargs.get('data_files', []): shared_data = {} for shared_directory, relative_paths in kwargs['data_files']: relative_files = {} for relative_path in relative_paths: relative_directory, filename = os.path.split(relative_path) relative_files.setdefault(relative_directory, []).append(filename) for relative_directory, files in sorted(relative_files.items()): if not os.path.isdir(relative_directory) or set(os.listdir(relative_directory)) != set(files): for filename in sorted(files): local_path = os.path.join(relative_directory, filename) shared_data[local_path] = f'{shared_directory}/{filename}' else: shared_data[relative_directory] = shared_directory build_targets.setdefault('wheel', {})['shared-data'] = shared_data build_targets['sdist'] = { 'include': [ f'/{package_source}', ] } hatch_metadata['build'] = build_data project_file = os.path.join(HERE, 'pyproject.toml') if os.path.isfile(project_file): with open(project_file, 'r', encoding='utf-8') as f: old_project_data = tomli.loads(f.read()) tool_metadata.update(old_project_data.get('tool', {})) old_project_data.pop('build-system', None) old_project_data.pop('project', None) old_project_data.pop('tool', None) project_data.update(old_project_data) with open(project_file, 'w', encoding='utf-8') as f: f.write(tomli_w.dumps(project_data)) ","def setup(**kwargs): import tomli import tomli_w from hatchling.__about__ import __version__ project_metadata = {} hatch_metadata = {} tool_metadata = {'hatch': hatch_metadata} project_data = { 'build-system': {'requires': [f'hatchling>={__version__}'], 'build-backend': 'hatchling.build'}, 'project': project_metadata, 'tool': tool_metadata, } _parse_setup_cfg(kwargs) _apply_env_vars(kwargs) name = kwargs['name'] project_name = name.replace('_', '-') package_name = package_path = package_source = sorted(kwargs.get('packages') or [name.replace('-', '_')])[0].split( '.' )[0] project_metadata['name'] = project_name if 'description' in kwargs: project_metadata['description'] = kwargs['description'] for readme_file in ('README.md', 'README.rst', 'README.txt'): if os.path.isfile(os.path.join(HERE, readme_file)): project_metadata['readme'] = readme_file break project_metadata['license'] = kwargs.get('license', '') if 'python_requires' in kwargs: project_metadata['requires-python'] = kwargs['python_requires'] author = {} if 'author' in kwargs: author['name'] = kwargs['author'] if 'author_email' in kwargs: author['email'] = kwargs['author_email'] if author: project_metadata['authors'] = [author] if 'keywords' in kwargs: keywords = kwargs['keywords'] if isinstance(keywords, str): keywords = keywords.replace(',', ' ').split() project_metadata['keywords'] = sorted(keywords) if 'classifiers' in kwargs: project_metadata['classifiers'] = sorted(kwargs['classifiers']) fixed_indices = [] final_index = 0 for i, classifier in enumerate(project_metadata['classifiers']): if classifier.startswith('Programming Language :: Python :: '): final_index = i for python_version in ('3.10', '3.11', '3.12'): if classifier.endswith(python_version): fixed_indices.append(i) break for i, index in enumerate(fixed_indices): project_metadata['classifiers'].insert(final_index, project_metadata['classifiers'].pop(index - i)) if 'install_requires' in kwargs: project_metadata['dependencies'] = sorted(kwargs['install_requires'], key=lambda d: d.lower()) project_metadata['dynamic'] = ['version'] if 'extras_require' in kwargs: project_metadata['optional-dependencies'] = { group: sorted(dependencies, key=lambda d: d.lower()) if isinstance(dependencies, list) else dependencies for group, dependencies in sorted(kwargs['extras_require'].items()) } if 'entry_points' in kwargs and isinstance(kwargs['entry_points'], dict): entry_points = {} for entry_point, definitions in kwargs['entry_points'].items(): if isinstance(definitions, str): definitions = [definitions] definitions = dict(sorted(d.replace(' ', '').split('=', 1) for d in definitions)) if entry_point == 'console_scripts': project_metadata['scripts'] = definitions elif entry_point == 'gui_scripts': project_metadata['gui-scripts'] = definitions else: entry_points[entry_point] = definitions if entry_points: project_metadata['entry-points'] = dict(sorted(entry_points.items())) urls = {} if 'url' in kwargs: urls['Homepage'] = kwargs['url'] if 'download_url' in kwargs: urls['Download'] = kwargs['download_url'] if 'project_urls' in kwargs: urls.update(kwargs['project_urls']) if urls: project_metadata['urls'] = dict(sorted(urls.items())) build_targets = {} build_data = {} if 'use_scm_version' in kwargs: project_data['build-system']['requires'].append('hatch-vcs') hatch_metadata['version'] = {'source': 'vcs'} build_data['hooks'] = {'vcs': {'version-file': f'{package_path}/_version.py'}} else: hatch_metadata['version'] = {'path': f'{package_path}/__init__.py'} build_data['targets'] = build_targets if '' in kwargs.get('package_dir', {}): package_source = kwargs['package_dir'][''] package = (kwargs.get('packages') or [package_name])[0] package_path = f'{package_source}/{package}' if package_path != f'src/{package_name}': build_targets.setdefault('wheel', {})['packages'] = [package_path] if kwargs.get('data_files', []): shared_data = {} for shared_directory, relative_paths in kwargs['data_files']: relative_files = {} for relative_path in relative_paths: relative_directory, filename = os.path.split(relative_path) relative_files.setdefault(relative_directory, []).append(filename) for relative_directory, files in sorted(relative_files.items()): if not os.path.isdir(relative_directory) or set(os.listdir(relative_directory)) != set(files): for filename in sorted(files): local_path = os.path.join(relative_directory, filename).replace('\\', '/') shared_data[local_path] = f'{shared_directory}/{filename}' else: shared_data[relative_directory] = shared_directory build_targets.setdefault('wheel', {})['shared-data'] = shared_data build_targets['sdist'] = { 'include': [ f'/{package_source}', ] } hatch_metadata['build'] = build_data project_file = os.path.join(HERE, 'pyproject.toml') if os.path.isfile(project_file): with open(project_file, 'r', encoding='utf-8') as f: old_project_data = tomli.loads(f.read()) tool_metadata.update(old_project_data.get('tool', {})) old_project_data.pop('build-system', None) old_project_data.pop('project', None) old_project_data.pop('tool', None) project_data.update(old_project_data) with open(project_file, 'w', encoding='utf-8') as f: f.write(tomli_w.dumps(project_data)) " 25760,"def write_objective(n, terms): """""" Writer function for writing out one or mutliple objective terms. Parameters ---------- n : pypsa.Network terms : str/numpy.array/pandas.Series/pandas.DataFrame String or array of strings which represent new objective terms, built with :func:`linexpr` """""" n.objective_f.write(join_exprs(terms)) ","def write_objective(n, terms): """""" Writer function for writing out one or multiple objective terms. Parameters ---------- n : pypsa.Network terms : str/numpy.array/pandas.Series/pandas.DataFrame String or array of strings which represent new objective terms, built with :func:`linexpr` """""" n.objective_f.write(join_exprs(terms)) " 837,"def limit_seq(expr, n=None, trials=5): """"""Finds the limit of a sequence as index n tends to infinity. Parameters ========== expr : Expr SymPy expression for the n-th term of the sequence n : Symbol, optional The index of the sequence, an integer that tends to positive infinity. If None, inferred from the expression unless it has multiple symbols. trials: int, optional The algorithm is highly recursive. ``trials`` is a safeguard from infinite recursion in case the limit is not easily computed by the algorithm. Try increasing ``trials`` if the algorithm returns ``None``. Admissible Terms ================ The algorithm is designed for sequences built from rational functions, indefinite sums, and indefinite products over an indeterminate n. Terms of alternating sign are also allowed, but more complex oscillatory behavior is not supported. Examples ======== >>> from sympy import limit_seq, Sum, binomial >>> from sympy.abc import n, k, m >>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n) 5/3 >>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n) 3/4 >>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n) 4 See Also ======== sympy.series.limitseq.dominant References ========== .. [1] Computing Limits of Sequences - Manuel Kauers """""" from sympy.concrete.summations import Sum from sympy.calculus.util import AccumulationBounds if n is None: free = expr.free_symbols if len(free) == 1: n = free.pop() elif not free: return expr else: raise ValueError(""Expression has more than one variable. "" ""Please specify a variable."") elif n not in expr.free_symbols: return expr expr = expr.rewrite(fibonacci, S.GoldenRatio) n_ = Dummy(""n"", integer=True, positive=True) n1 = Dummy(""n"", odd=True, positive=True) n2 = Dummy(""n"", even=True, positive=True) # If there is a negative term raised to a power involving n, or a # trigonometric function, then consider even and odd n separately. powers = (p.as_base_exp() for p in expr.atoms(Pow)) if any(b.is_negative and e.has(n) for b, e in powers) or expr.has(cos, sin): L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials) L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials) if L1 != L2: if (L1 == None or L2 == None) and expr.has(cos, sin): L3 = _limit_seq(expr.xreplace({n: n_}), n_, trials) return L3 elif ((L1 != None and L2 != None) and (L1.is_comparable and L2.is_comparable)): return AccumulationBounds(Min(L1, L2), Max(L1, L2)) else: return None else: L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials) if L1 is not None: return L1 else: if expr.is_Add: limits = [limit_seq(term, n, trials) for term in expr.args] if any(result is None for result in limits): return None else: return Add(*limits) # Maybe the absolute value is easier to deal with (though not if # it has a Sum). If it tends to 0, the limit is 0. elif not expr.has(Sum): if _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials).is_zero: return S.Zero ","def limit_seq(expr, n=None, trials=5): """"""Finds the limit of a sequence as index n tends to infinity. Parameters ========== expr : Expr SymPy expression for the n-th term of the sequence n : Symbol, optional The index of the sequence, an integer that tends to positive infinity. If None, inferred from the expression unless it has multiple symbols. trials: int, optional The algorithm is highly recursive. ``trials`` is a safeguard from infinite recursion in case the limit is not easily computed by the algorithm. Try increasing ``trials`` if the algorithm returns ``None``. Admissible Terms ================ The algorithm is designed for sequences built from rational functions, indefinite sums, and indefinite products over an indeterminate n. Terms of alternating sign are also allowed, but more complex oscillatory behavior is not supported. Examples ======== >>> from sympy import limit_seq, Sum, binomial >>> from sympy.abc import n, k, m >>> limit_seq((5*n**3 + 3*n**2 + 4) / (3*n**3 + 4*n - 5), n) 5/3 >>> limit_seq(binomial(2*n, n) / Sum(binomial(2*k, k), (k, 1, n)), n) 3/4 >>> limit_seq(Sum(k**2 * Sum(2**m/m, (m, 1, k)), (k, 1, n)) / (2**n*n), n) 4 See Also ======== sympy.series.limitseq.dominant References ========== .. [1] Computing Limits of Sequences - Manuel Kauers """""" from sympy.concrete.summations import Sum from sympy.calculus.util import AccumulationBounds if n is None: free = expr.free_symbols if len(free) == 1: n = free.pop() elif not free: return expr else: raise ValueError(""Expression has more than one variable. "" ""Please specify a variable."") elif n not in expr.free_symbols: return expr expr = expr.rewrite(fibonacci, S.GoldenRatio) n_ = Dummy(""n"", integer=True, positive=True) n1 = Dummy(""n"", odd=True, positive=True) n2 = Dummy(""n"", even=True, positive=True) # If there is a negative term raised to a power involving n, or a # trigonometric function, then consider even and odd n separately. powers = (p.as_base_exp() for p in expr.atoms(Pow)) if any(b.is_negative and e.has(n) for b, e in powers) or expr.has(cos, sin): L1 = _limit_seq(expr.xreplace({n: n1}), n1, trials) L2 = _limit_seq(expr.xreplace({n: n2}), n2, trials) if L1 != L2: if (L1 is None or L2 is None) and expr.has(cos, sin): L3 = _limit_seq(expr.xreplace({n: n_}), n_, trials) return L3 elif ((L1 != None and L2 != None) and (L1.is_comparable and L2.is_comparable)): return AccumulationBounds(Min(L1, L2), Max(L1, L2)) else: return None else: L1 = _limit_seq(expr.xreplace({n: n_}), n_, trials) if L1 is not None: return L1 else: if expr.is_Add: limits = [limit_seq(term, n, trials) for term in expr.args] if any(result is None for result in limits): return None else: return Add(*limits) # Maybe the absolute value is easier to deal with (though not if # it has a Sum). If it tends to 0, the limit is 0. elif not expr.has(Sum): if _limit_seq(Abs(expr.xreplace({n: n_})), n_, trials).is_zero: return S.Zero " 26236,"def generate_and_verify_balancing_traffic(duthost, ptfhost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pktlen, ttl): """""" Send TCP packets and verify balancing range between sub-ports. Args: duthost: DUT host object ptfhost: PTF host object ptfadapter: PTF adapter src_port: Port of PTF dst_port: Port of DUT ip_src: Source IP address of PTF ip_dst: Destination IP address of DUT pkt_action: Packet action (forwarded or drop) pktlen: packet length ttl: Time to live """""" router_mac = duthost.facts['router_mac'] src_port_number = int(get_port_number(src_port)) src_mac = ptfadapter.dataplane.get_mac(0, src_port_number) ip_src = '10.0.0.1' ip_dst = ip_dst.split('/')[0] vlan_vid = None dl_vlan_enable = False send_pkt_length = pktlen if constants.VLAN_SUB_INTERFACE_SEPARATOR in src_port: vlan_vid = int(src_port.split('.')[1]) dl_vlan_enable = True send_pkt_length += len(scapyall.Dot1Q()) pkt = create_packet(eth_src=src_mac, eth_dst=router_mac, ip_src=ip_src, ip_dst=ip_dst, vlan_vid=vlan_vid, dl_vlan_enable=dl_vlan_enable, tr_type='TCP', ttl=64, pktlen=send_pkt_length) ptfadapter.dataplane.flush() time.sleep(2) for _ in range(BALANCING_TEST_TIMES * len(dst_port)): testutils.send_packet(ptfadapter, src_port_number, pkt) pkt['IP'].src = str(ipaddress.ip_address(pkt['IP'].src.encode().decode()) + 1) exp_pkt = create_packet(eth_src=router_mac, eth_dst=src_mac, ip_src=ip_src, ip_dst=ip_dst, vlan_vid=None, dl_vlan_enable=False, tr_type='TCP', ttl=ttl, pktlen=pktlen) ifaces_map = ptfhost.host.options['variable_manager'].extra_vars['ifaces_map'] config_port_indices = {v: k for k, v in ifaces_map.items()} dst_port_numbers = [config_port_indices[k] for k in config_port_indices if k in dst_port] ignore_fields=[(""Ether"", ""dst""), (""IP"", ""src""), (""IP"", ""chksum""), (""TCP"", ""chksum"")] if dl_vlan_enable: ignore_fields.append((""Ether"", ""type"")) pkt_filter = FilterPktBuffer(ptfadapter=ptfadapter, exp_pkt=exp_pkt, dst_port_numbers=dst_port_numbers, match_fields=[(""Ethernet"", ""src""), (""IP"", ""dst""), ('TCP', ""dport"")], ignore_fields=ignore_fields) pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() pytest_assert(pkt_in_buffer is True, ""Expected packet not available:\n{}"".format(pkt_in_buffer)) pytest_assert(check_balancing(pkt_filter.matched_index), ""Balancing error:\n{}"".format(pkt_filter.matched_index)) ","def generate_and_verify_balancing_traffic(duthost, ptfhost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pktlen, ttl): """""" Send TCP packets and verify balancing range between sub-ports. Args: duthost: DUT host object ptfhost: PTF host object ptfadapter: PTF adapter src_port: Port of PTF dst_port: Port of DUT ip_src: Source IP address of PTF ip_dst: Destination IP address of DUT pkt_action: Packet action (forwarded or drop) pktlen: packet length ttl: Time to live """""" router_mac = duthost.facts['router_mac'] src_port_number = int(get_port_number(src_port)) src_mac = ptfadapter.dataplane.get_mac(0, src_port_number) ip_src = '10.0.0.1' ip_dst = ip_dst.split('/')[0] vlan_vid = None dl_vlan_enable = False send_pkt_length = pktlen if constants.VLAN_SUB_INTERFACE_SEPARATOR in src_port: vlan_vid = int(src_port.split(VLAN_SUB_INTERFACE_SEPARATOR)[1]) dl_vlan_enable = True send_pkt_length += len(scapyall.Dot1Q()) pkt = create_packet(eth_src=src_mac, eth_dst=router_mac, ip_src=ip_src, ip_dst=ip_dst, vlan_vid=vlan_vid, dl_vlan_enable=dl_vlan_enable, tr_type='TCP', ttl=64, pktlen=send_pkt_length) ptfadapter.dataplane.flush() time.sleep(2) for _ in range(BALANCING_TEST_TIMES * len(dst_port)): testutils.send_packet(ptfadapter, src_port_number, pkt) pkt['IP'].src = str(ipaddress.ip_address(pkt['IP'].src.encode().decode()) + 1) exp_pkt = create_packet(eth_src=router_mac, eth_dst=src_mac, ip_src=ip_src, ip_dst=ip_dst, vlan_vid=None, dl_vlan_enable=False, tr_type='TCP', ttl=ttl, pktlen=pktlen) ifaces_map = ptfhost.host.options['variable_manager'].extra_vars['ifaces_map'] config_port_indices = {v: k for k, v in ifaces_map.items()} dst_port_numbers = [config_port_indices[k] for k in config_port_indices if k in dst_port] ignore_fields=[(""Ether"", ""dst""), (""IP"", ""src""), (""IP"", ""chksum""), (""TCP"", ""chksum"")] if dl_vlan_enable: ignore_fields.append((""Ether"", ""type"")) pkt_filter = FilterPktBuffer(ptfadapter=ptfadapter, exp_pkt=exp_pkt, dst_port_numbers=dst_port_numbers, match_fields=[(""Ethernet"", ""src""), (""IP"", ""dst""), ('TCP', ""dport"")], ignore_fields=ignore_fields) pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() pytest_assert(pkt_in_buffer is True, ""Expected packet not available:\n{}"".format(pkt_in_buffer)) pytest_assert(check_balancing(pkt_filter.matched_index), ""Balancing error:\n{}"".format(pkt_filter.matched_index)) " 41902,"def test_wfg_2d() -> None: for n in range(2, 30): r = n * np.ones(2) s = np.asarray([[n - 1 - i, i] for i in range(n)]) for i in range(n + 1): s = np.vstack((s, np.asarray([i, n - i]))) np.random.shuffle(s) v = optuna.multi_objective._hypervolume._compute_2d(s, r) assert v == n * n - n * (n - 1) // 2 ","def test_compute_2d() -> None: for n in range(2, 30): r = n * np.ones(2) s = np.asarray([[n - 1 - i, i] for i in range(n)]) for i in range(n + 1): s = np.vstack((s, np.asarray([i, n - i]))) np.random.shuffle(s) v = optuna.multi_objective._hypervolume._compute_2d(s, r) assert v == n * n - n * (n - 1) // 2 " 58889,"def ksel(self, type_="""", item="""", comp="""", vmin="""", vmax="""", vinc="""", kabs="""", **kwargs): """"""Selects a subset of keypoints or hard points. APDL Command: KSEL Parameters ---------- type\_ Label identifying the type of select: S - Select a new set (default). R - Reselect a set from the current set. A - Additionally select a set and extend the current set. U - Unselect a set from the current set. ALL - Restore the full set. NONE - Unselect the full set. INVE - Invert the current set (selected becomes unselected and vice versa). STAT - Display the current select status. Notes ----- Selects a subset of keypoints or hard points. For example, to select a new set of keypoints based on keypoint numbers 1 through 7, use >>> mapdl.ksel('S', 'KP', '', 1, 7) The selected subset is used when the ALL label is entered (or implied) on other commands, such as KLIST,ALL. Only data identified by keypoint number are selected. Data are flagged as selected and unselected; no data are actually deleted from the database. This command is valid in any processor. For selections based on non-integer numbers (coordinates, results, etc.), items that are within the range VMIN -Toler and VMAX + Toler are selected. The default tolerance Toler is based on the relative values of VMIN and VMAX as follows: If VMIN = VMAX, Toler = 0.005 x VMIN. If VMIN = VMAX = 0.0, Toler = 1.0E-6. If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX - VMIN). Use the SELTOL command to override this default and specify Toler explicitly. Table: 203:: : KSEL - Valid Item and Component Labels Examples -------- To select a single keypoint (keypoint 1) >>> mapdl.ksel('S', 'KP', '', 1) """""" command = ""KSEL,%s,%s,%s,%s,%s,%s,%s"" % (str(type_), str( item), str(comp), str(vmin), str(vmax), str(vinc), str(kabs)) return self.run(command, **kwargs) ","def ksel(self, type_="""", item="""", comp="""", vmin="""", vmax="""", vinc="""", kabs="""", **kwargs): """"""Selects a subset of keypoints or hard points. APDL Command: KSEL Parameters ---------- type\_ Label identifying the type of select: S - Select a new set (default). R - Reselect a set from the current set. A - Additionally select a set and extend the current set. U - Unselect a set from the current set. ALL - Restore the full set. NONE - Unselect the full set. INVE - Invert the current set (selected becomes unselected and vice versa). STAT - Display the current select status. Notes ----- Selects a subset of keypoints or hard points. For example, to select a new set of keypoints based on keypoint numbers 1 through 7, use >>> mapdl.ksel('S', 'KP', '', 1, 7) The selected subset is used when the ALL label is entered (or implied) on other commands, such as KLIST,ALL. Only data identified by keypoint number are selected. Data are flagged as selected and unselected; no data are actually deleted from the database. This command is valid in any processor. For selections based on non-integer numbers (coordinates, results, etc.), items that are within the range VMIN -Toler and VMAX + Toler are selected. The default tolerance Toler is based on the relative values of VMIN and VMAX as follows: If VMIN = VMAX, Toler = 0.005 x VMIN. If VMIN = VMAX = 0.0, Toler = 1.0E-6. If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX - VMIN). Use the SELTOL command to override this default and specify Toler explicitly. Table: 203:: : KSEL - Valid Item and Component Labels Examples -------- To select a single keypoint (keypoint 1) >>> mapdl.ksel('S', 'KP', '', 1) """""" command = ""KSEL,%s,%s,%s,%s,%s,%s,%s"" % (str(type_), str( item), str(comp), str(vmin), str(vmax), str(vinc), str(kabs)) return self.run(command, **kwargs) " 383,"def _get_scaling(total_size: Optional[Union[int, Sequence[int]]], shape, ndim: int): """""" Gets scaling constant for logp. Parameters ---------- total_size: Optional[int|List[int]] size of a fully observed data without minibatching, `None` means data is fully observed shape: shape shape of an observed data ndim: int ndim hint Returns ------- scalar """""" if total_size is None: coef = 1.0 elif isinstance(total_size, int): if ndim >= 1: denom = shape[0] else: denom = 1 coef = floatX(total_size) / floatX(denom) elif isinstance(total_size, (list, tuple)): if not all(isinstance(i, int) for i in total_size if (i is not Ellipsis and i is not None)): raise TypeError( ""Unrecognized `total_size` type, expected "" ""int or list of ints, got %r"" % total_size ) if Ellipsis in total_size: sep = total_size.index(Ellipsis) begin = total_size[:sep] end = total_size[sep + 1 :] if Ellipsis in end: raise ValueError( ""Double Ellipsis in `total_size` is restricted, got %r"" % total_size ) else: begin = total_size end = [] if (len(begin) + len(end)) > ndim: raise ValueError( ""Length of `total_size` is too big, "" ""number of scalings is bigger that ndim, got %r"" % total_size ) elif (len(begin) + len(end)) == 0: coef = 1.0 if len(end) > 0: shp_end = shape[-len(end) :] else: shp_end = np.asarray([]) shp_begin = shape[: len(begin)] begin_coef = [ floatX(t) / floatX(shp_begin[i]) for i, t in enumerate(begin) if t is not None ] end_coef = [floatX(t) / floatX(shp_end[i]) for i, t in enumerate(end) if t is not None] coefs = begin_coef + end_coef coef = at.prod(coefs) elif isinstance(total_size, (np.ndarray, at.TensorVariable)): coef = total_size warnings.warn(""Using array as scaling that is experimental"", UserWarning) else: raise TypeError( ""Unrecognized `total_size` type, expected int or list of ints, got %r"" % total_size ) return at.as_tensor(coef, dtype=aesara.config.floatX) ","def _get_scaling(total_size: Optional[Union[int, Sequence[int]]], shape, ndim: int): """""" Gets scaling constant for logp. Parameters ---------- total_size: Optional[int|List[int]] size of a fully observed data without minibatching, `None` means data is fully observed shape: shape shape of an observed data ndim: int ndim hint Returns ------- scalar """""" if total_size is None: coef = 1.0 elif isinstance(total_size, int): if ndim >= 1: denom = shape[0] else: denom = 1 coef = floatX(total_size) / floatX(denom) elif isinstance(total_size, (list, tuple)): if not all(isinstance(i, int) for i in total_size if (i is not Ellipsis and i is not None)): raise TypeError( ""Unrecognized `total_size` type, expected "" ""int or list of ints, got %r"" % total_size ) if Ellipsis in total_size: sep = total_size.index(Ellipsis) begin = total_size[:sep] end = total_size[sep + 1 :] if Ellipsis in end: raise ValueError( ""Double Ellipsis in `total_size` is restricted, got %r"" % total_size ) else: begin = total_size end = [] if (len(begin) + len(end)) > ndim: raise ValueError( ""Length of `total_size` is too big, "" ""number of scalings is bigger that ndim, got %r"" % total_size ) elif (len(begin) + len(end)) == 0: coef = 1.0 if len(end) > 0: shp_end = shape[-len(end) :] else: shp_end = np.asarray([]) shp_begin = shape[: len(begin)] begin_coef = [ floatX(t) / floatX(shp_begin[i]) for i, t in enumerate(begin) if t is not None ] end_coef = [floatX(t) / floatX(shp_end[i]) for i, t in enumerate(end) if t is not None] coefs = begin_coef + end_coef coef = at.prod(coefs) elif isinstance(total_size, (np.ndarray, at.TensorVariable)): coef = total_size warnings.warn(""Using an array for total_size is experimental."", UserWarning) else: raise TypeError( ""Unrecognized `total_size` type, expected int or list of ints, got %r"" % total_size ) return at.as_tensor(coef, dtype=aesara.config.floatX) " 58895,"def launch_mapdl( exec_file=None, run_location=None, jobname=""file"", nproc=2, ram=None, mode=None, override=False, loglevel=""ERROR"", additional_switches="""", start_timeout=120, port=None, cleanup_on_exit=True, start_instance=True, ip=LOCALHOST, clear_on_connect=True, log_apdl=False, verbose_mapdl=False, license_server_check=True, license_type=None, **kwargs, ): """"""Start MAPDL locally in gRPC mode. Parameters ---------- exec_file : str, optional The location of the MAPDL executable. Will use the cached location when left at the default ``None``. run_location : str, optional MAPDL working directory. Defaults to a temporary working directory. If directory doesn't exist, will create one. jobname : str, optional MAPDL jobname. Defaults to ``'file'``. nproc : int, optional Number of processors. Defaults to 2. ram : float, optional Fixed amount of memory to request for MAPDL. If ``None``, then MAPDL will use as much as available on the host machine. mode : str, optional Mode to launch MAPDL. Must be one of the following: - ``'grpc'`` - ``'corba'`` - ``'console'`` The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and provides the best performance and stability. The ``'corba'`` mode is available from v17.0 and newer and is given legacy support. This mode requires the additional ``ansys_corba`` module. Finally, the ``'console'`` mode is for legacy use only Linux only prior to v17.0. This console mode is pending depreciation. override : bool, optional Attempts to delete the lock file at the run_location. Useful when a prior MAPDL session has exited prematurely and the lock file has not been deleted. loglevel : str, optional Sets which messages are printed to the console. ``'INFO'`` prints out all ANSYS messages, ``'WARNING``` prints only messages containing ANSYS warnings, and ``'ERROR'`` logs only error messages. additional_switches : str, optional Additional switches for MAPDL, for example ``'aa_r'``, the academic research license, would be added with: - ``additional_switches=""-aa_r""`` Avoid adding switches like -i -o or -b as these are already included to start up the MAPDL server. See the notes section for additional details. start_timeout : float, optional Maximum allowable time to connect to the MAPDL server. port : int Port to launch MAPDL gRPC on. Final port will be the first port available after (or including) this port. Defaults to 50052. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_PORT=`` custom_bin : str, optional Path to the MAPDL custom executable. On release 2020R2 on Linux, if ``None``, will check to see if you have ``ansys.mapdl_bin`` installed and use that executable. cleanup_on_exit : bool, optional Exit MAPDL when python exits or the mapdl Python instance is garbage collected. start_instance : bool, optional When False, connect to an existing MAPDL instance at ``ip`` and ``port``, which default to ``'127.0.0.1'`` at 50052. Otherwise, launch a local instance of MAPDL. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_START_INSTANCE=FALSE``. ip : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``'127.0.0.1'``. You can also override the default behavior of this keyword argument with the environment variable ""PYMAPDL_IP=FALSE"". clear_on_connect : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``True``, giving you a fresh environment when connecting to MAPDL. log_apdl : str, optional Enables logging every APDL command to the local disk. This can be used to ""record"" all the commands that are sent to MAPDL via PyMAPDL so a script can be run within MAPDL without PyMAPDL. remove_temp_files : bool, optional Removes temporary files on exit. Default ``False``. verbose_mapdl : bool, optional Enable printing of all output when launching and running MAPDL. This should be used for debugging only as output can be tracked within pymapdl. Default ``False``. license_server_check : bool, optional Check if the license server is available if MAPDL fails to start. Only available on ``mode='grpc'``. Defaults ``True``. license_type : str, optional Enable license type selection. You can input a string for its license name (for example 'meba' or 'ansys') or its description (""enterprise solver"" or ""enterprise"" respectively). You can also use legacy licenses (for example 'aa_t_a') but it will also raise a warning. If it is not used (None), no specific license will be requested, being up to the license server to provide a specific license typ. Default is ``None``. Notes ----- These are the MAPDL switch options as of 2020R2 applicable for running MAPDL as a service via gRPC. Excluded switches such as ``""-j""`` either not applicable or are set via keyword arguments. -acc : Enables the use of GPU hardware. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -amfg : Enables the additive manufacturing capability. Requires an additive manufacturing license. For general information about this feature, see AM Process Simulation in ANSYS Workbench. -ansexe : Activates a custom mechanical APDL executable. In the ANSYS Workbench environment, activates a custom Mechanical APDL executable. -custom : Calls a custom Mechanical APDL executable See Running Your Custom Executable in the Programmer's Reference for more information. -db value : Initial memory allocation Defines the portion of workspace (memory) to be used as the initial allocation for the database. The default is 1024 MB. Specify a negative number to force a fixed size throughout the run; useful on small memory systems. -dis : Enables Distributed ANSYS See the Parallel Processing Guide for more information. -dvt : Enables ANSYS DesignXplorer advanced task (add-on). Requires DesignXplorer. -l : Specifies a language file to use other than English This option is valid only if you have a translated message file in an appropriately named subdirectory in ``/ansys_inc/v201/ansys/docu`` or ``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu`` -m : Specifies the total size of the workspace Workspace (memory) in megabytes used for the initial allocation. If you omit the ``-m`` option, the default is 2 GB (2048 MB). Specify a negative number to force a fixed size throughout the run. -machines : Specifies the distributed machines Machines on which to run a Distributed ANSYS analysis. See Starting Distributed ANSYS in the Parallel Processing Guide for more information. -mpi : Specifies the type of MPI to use. See the Parallel Processing Guide for more information. -mpifile : Specifies an existing MPI file Specifies an existing MPI file (appfile) to be used in a Distributed ANSYS run. See Using MPI Files in the Parallel Processing Guide for more information. -na : Specifies the number of GPU accelerator devices Number of GPU devices per machine or compute node when running with the GPU accelerator feature. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -name : Defines Mechanical APDL parameters Set mechanical APDL parameters at program start-up. The parameter name must be at least two characters long. For details about parameters, see the ANSYS Parametric Design Language Guide. -p : ANSYS session product Defines the ANSYS session product that will run during the session. For more detailed information about the ``-p`` option, see Selecting an ANSYS Product via the Command Line. -ppf : HPC license Specifies which HPC license to use during a parallel processing run. See HPC Licensing in the Parallel Processing Guide for more information. -smp : Enables shared-memory parallelism. See the Parallel Processing Guide for more information. Examples -------- Launch MAPDL using the best protocol. >>> from ansys.mapdl.core import launch_mapdl >>> mapdl = launch_mapdl() Run MAPDL with shared memory parallel and specify the location of the ansys binary. >>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe' >>> mapdl = launch_mapdl(exec_file, additional_switches='-smp') Connect to an existing instance of MAPDL at IP 192.168.1.30 and port 50001. This is only available using the latest ``'grpc'`` mode. >>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30', port=50001) Force the usage of the CORBA protocol. >>> mapdl = launch_mapdl(mode='corba') Run MAPDL using the console mode (available only on Linux). >>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194', mode='console') """""" # These parameters are partially used for unit testing set_no_abort = kwargs.get(""set_no_abort"", True) ip = os.environ.get(""PYMAPDL_IP"", ip) if ""PYMAPDL_PORT"" in os.environ: port = int(os.environ.get(""PYMAPDL_PORT"")) if port is None: port = MAPDL_DEFAULT_PORT # connect to an existing instance if enabled if not get_start_instance(start_instance): mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=False, loglevel=loglevel, set_no_abort=set_no_abort, ) if clear_on_connect: mapdl.clear() return mapdl # verify executable if exec_file is None: # Load cached path exec_file = get_ansys_path() if exec_file is None: raise FileNotFoundError( ""Invalid exec_file path or cannot load cached "" ""mapdl path. Enter one manually by specifying "" ""exec_file="" ) else: # verify ansys exists at this location if not os.path.isfile(exec_file): raise FileNotFoundError( f'Invalid MAPDL executable at ""{exec_file}""\n' ""Enter one manually using exec_file="" ) # verify run location if run_location is None: temp_dir = tempfile.gettempdir() run_location = os.path.join(temp_dir, ""ansys_%s"" % random_string(10)) if not os.path.isdir(run_location): try: os.mkdir(run_location) except: raise RuntimeError( ""Unable to create the temporary working "" f'directory ""{run_location}""\n' ""Please specify run_location="" ) else: if not os.path.isdir(run_location): raise FileNotFoundError(f'""{run_location}"" is not a valid directory') # verify no lock file and the mode is valid check_lock_file(run_location, jobname, override) mode = check_mode(mode, _version_from_path(exec_file)) # cache start parameters additional_switches = _validate_add_sw( additional_switches, exec_file, kwargs.pop(""force_intel"", False) ) if license_type is not None and isinstance(license_type, str): # In newer license server versions an invalid license name just get discarded and produces no effect or warning. # For example: # ```bash # mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES. # mapdl.exe - p yoyoyo # The -p flag is ignored and it run the default license. # ``` # # In older versions probably it might raise an error. But not sure. license_type = license_type.lower().strip() if 'enterprise' in license_type and 'solver' not in license_type: license_type = 'ansys' elif 'enterprise' in license_type and 'solver' in license_type: license_type = 'meba' elif 'premium' in license_type: license_type = 'mech_2' elif 'pro' in license_type: license_type = 'mech_1' elif license_type not in ALLOWABLE_LICENSES: allow_lics = [f""'{each}'"" for each in ALLOWABLE_LICENSES] warn_text = \ f""The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"" + \ ""Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"" + \ f""Recognized license names: {' '.join(allow_lics)}"" warnings.warn(warn_text, UserWarning) # LOG.warning(warn_text) additional_switches += ' -p ' + license_type # LOG.debug(f""Using specified license name '{license_type}' in the 'license_type' keyword argument."") elif '-p ' in additional_switches: # There is already a license request in additional switches. license_type = re.findall(r'-p \b(\w*)', additional_switches)[0] # getting only the first product license. if license_type not in ALLOWABLE_LICENSES: allow_lics = [f""'{each}'"" for each in ALLOWABLE_LICENSES] warn_text = \ f""The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"" + \ ""Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"" + \ f""Recognized license names: {' '.join(allow_lics)}"" warnings.warn(warn_text, UserWarning) # LOG.warning(warn_text) # LOG.debug(f""Using specified license name '{license_type}' in the additional switches parameter."") elif license_type is not None: raise TypeError(""The argument 'license_type' does only accept str or None."") start_parm = { ""exec_file"": exec_file, ""run_location"": run_location, ""additional_switches"": additional_switches, ""jobname"": jobname, ""nproc"": nproc, } if mode in [""console"", ""corba""]: start_parm[""start_timeout""] = start_timeout else: start_parm[""ram""] = ram start_parm[""override""] = override start_parm[""timeout""] = start_timeout # Check the license server if license_server_check: # configure timeout to be 90% of the wait time of the startup # time for Ansys. lic_check = LicenseChecker( timeout=start_timeout*0.9, verbose=verbose_mapdl ) lic_check.start() try: if mode == ""console"": from ansys.mapdl.core.mapdl_console import MapdlConsole mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm) elif mode == ""corba"": try: # pending deprication to ansys-mapdl-corba from ansys.mapdl.core.mapdl_corba import MapdlCorba except ImportError: raise ImportError( ""To use this feature, install the MAPDL CORBA package"" "" with:\n\npip install ansys_corba"" ) from None broadcast = kwargs.get(""log_broadcast"", False) mapdl = MapdlCorba( loglevel=loglevel, log_apdl=log_apdl, log_broadcast=broadcast, verbose=verbose_mapdl, **start_parm, ) elif mode == ""grpc"": port, actual_run_location = launch_grpc( port=port, verbose=verbose_mapdl, ip=ip, **start_parm ) mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=cleanup_on_exit, loglevel=loglevel, set_no_abort=set_no_abort, remove_temp_files=kwargs.pop(""remove_temp_files"", False), log_apdl=log_apdl, **start_parm, ) if run_location is None: mapdl._path = actual_run_location except Exception as exception: # Failed to launch for some reason. Check if failure was due # to the license check if license_server_check: lic_check.check() # pass raise exception # Setting license type. This is passed as an additional switch mapdl.license_type = license_type return mapdl ","def launch_mapdl( exec_file=None, run_location=None, jobname=""file"", nproc=2, ram=None, mode=None, override=False, loglevel=""ERROR"", additional_switches="""", start_timeout=120, port=None, cleanup_on_exit=True, start_instance=True, ip=LOCALHOST, clear_on_connect=True, log_apdl=False, verbose_mapdl=False, license_server_check=True, license_type=None, **kwargs, ): """"""Start MAPDL locally in gRPC mode. Parameters ---------- exec_file : str, optional The location of the MAPDL executable. Will use the cached location when left at the default ``None``. run_location : str, optional MAPDL working directory. Defaults to a temporary working directory. If directory doesn't exist, will create one. jobname : str, optional MAPDL jobname. Defaults to ``'file'``. nproc : int, optional Number of processors. Defaults to 2. ram : float, optional Fixed amount of memory to request for MAPDL. If ``None``, then MAPDL will use as much as available on the host machine. mode : str, optional Mode to launch MAPDL. Must be one of the following: - ``'grpc'`` - ``'corba'`` - ``'console'`` The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and provides the best performance and stability. The ``'corba'`` mode is available from v17.0 and newer and is given legacy support. This mode requires the additional ``ansys_corba`` module. Finally, the ``'console'`` mode is for legacy use only Linux only prior to v17.0. This console mode is pending depreciation. override : bool, optional Attempts to delete the lock file at the run_location. Useful when a prior MAPDL session has exited prematurely and the lock file has not been deleted. loglevel : str, optional Sets which messages are printed to the console. ``'INFO'`` prints out all ANSYS messages, ``'WARNING``` prints only messages containing ANSYS warnings, and ``'ERROR'`` logs only error messages. additional_switches : str, optional Additional switches for MAPDL, for example ``'aa_r'``, the academic research license, would be added with: - ``additional_switches=""-aa_r""`` Avoid adding switches like -i -o or -b as these are already included to start up the MAPDL server. See the notes section for additional details. start_timeout : float, optional Maximum allowable time to connect to the MAPDL server. port : int Port to launch MAPDL gRPC on. Final port will be the first port available after (or including) this port. Defaults to 50052. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_PORT=`` custom_bin : str, optional Path to the MAPDL custom executable. On release 2020R2 on Linux, if ``None``, will check to see if you have ``ansys.mapdl_bin`` installed and use that executable. cleanup_on_exit : bool, optional Exit MAPDL when python exits or the mapdl Python instance is garbage collected. start_instance : bool, optional When False, connect to an existing MAPDL instance at ``ip`` and ``port``, which default to ``'127.0.0.1'`` at 50052. Otherwise, launch a local instance of MAPDL. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_START_INSTANCE=FALSE``. ip : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``'127.0.0.1'``. You can also override the default behavior of this keyword argument with the environment variable ""PYMAPDL_IP=FALSE"". clear_on_connect : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``True``, giving you a fresh environment when connecting to MAPDL. log_apdl : str, optional Enables logging every APDL command to the local disk. This can be used to ""record"" all the commands that are sent to MAPDL via PyMAPDL so a script can be run within MAPDL without PyMAPDL. remove_temp_files : bool, optional Removes temporary files on exit. Default ``False``. verbose_mapdl : bool, optional Enable printing of all output when launching and running MAPDL. This should be used for debugging only as output can be tracked within pymapdl. Default ``False``. license_server_check : bool, optional Check if the license server is available if MAPDL fails to start. Only available on ``mode='grpc'``. Defaults ``True``. license_type : str, optional Enable license type selection. You can input a string for its license name (for example ``'meba'`` or ``'ansys'``) or its description (""enterprise solver"" or ""enterprise"" respectively). You can also use legacy licenses (for example ``'aa_t_a'``) but it will also raise a warning. If it is not used (``None``), no specific license will be requested, being up to the license server to provide a specific license type. Default is ``None``. Notes ----- These are the MAPDL switch options as of 2020R2 applicable for running MAPDL as a service via gRPC. Excluded switches such as ``""-j""`` either not applicable or are set via keyword arguments. -acc : Enables the use of GPU hardware. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -amfg : Enables the additive manufacturing capability. Requires an additive manufacturing license. For general information about this feature, see AM Process Simulation in ANSYS Workbench. -ansexe : Activates a custom mechanical APDL executable. In the ANSYS Workbench environment, activates a custom Mechanical APDL executable. -custom : Calls a custom Mechanical APDL executable See Running Your Custom Executable in the Programmer's Reference for more information. -db value : Initial memory allocation Defines the portion of workspace (memory) to be used as the initial allocation for the database. The default is 1024 MB. Specify a negative number to force a fixed size throughout the run; useful on small memory systems. -dis : Enables Distributed ANSYS See the Parallel Processing Guide for more information. -dvt : Enables ANSYS DesignXplorer advanced task (add-on). Requires DesignXplorer. -l : Specifies a language file to use other than English This option is valid only if you have a translated message file in an appropriately named subdirectory in ``/ansys_inc/v201/ansys/docu`` or ``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu`` -m : Specifies the total size of the workspace Workspace (memory) in megabytes used for the initial allocation. If you omit the ``-m`` option, the default is 2 GB (2048 MB). Specify a negative number to force a fixed size throughout the run. -machines : Specifies the distributed machines Machines on which to run a Distributed ANSYS analysis. See Starting Distributed ANSYS in the Parallel Processing Guide for more information. -mpi : Specifies the type of MPI to use. See the Parallel Processing Guide for more information. -mpifile : Specifies an existing MPI file Specifies an existing MPI file (appfile) to be used in a Distributed ANSYS run. See Using MPI Files in the Parallel Processing Guide for more information. -na : Specifies the number of GPU accelerator devices Number of GPU devices per machine or compute node when running with the GPU accelerator feature. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -name : Defines Mechanical APDL parameters Set mechanical APDL parameters at program start-up. The parameter name must be at least two characters long. For details about parameters, see the ANSYS Parametric Design Language Guide. -p : ANSYS session product Defines the ANSYS session product that will run during the session. For more detailed information about the ``-p`` option, see Selecting an ANSYS Product via the Command Line. -ppf : HPC license Specifies which HPC license to use during a parallel processing run. See HPC Licensing in the Parallel Processing Guide for more information. -smp : Enables shared-memory parallelism. See the Parallel Processing Guide for more information. Examples -------- Launch MAPDL using the best protocol. >>> from ansys.mapdl.core import launch_mapdl >>> mapdl = launch_mapdl() Run MAPDL with shared memory parallel and specify the location of the ansys binary. >>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe' >>> mapdl = launch_mapdl(exec_file, additional_switches='-smp') Connect to an existing instance of MAPDL at IP 192.168.1.30 and port 50001. This is only available using the latest ``'grpc'`` mode. >>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30', port=50001) Force the usage of the CORBA protocol. >>> mapdl = launch_mapdl(mode='corba') Run MAPDL using the console mode (available only on Linux). >>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194', mode='console') """""" # These parameters are partially used for unit testing set_no_abort = kwargs.get(""set_no_abort"", True) ip = os.environ.get(""PYMAPDL_IP"", ip) if ""PYMAPDL_PORT"" in os.environ: port = int(os.environ.get(""PYMAPDL_PORT"")) if port is None: port = MAPDL_DEFAULT_PORT # connect to an existing instance if enabled if not get_start_instance(start_instance): mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=False, loglevel=loglevel, set_no_abort=set_no_abort, ) if clear_on_connect: mapdl.clear() return mapdl # verify executable if exec_file is None: # Load cached path exec_file = get_ansys_path() if exec_file is None: raise FileNotFoundError( ""Invalid exec_file path or cannot load cached "" ""mapdl path. Enter one manually by specifying "" ""exec_file="" ) else: # verify ansys exists at this location if not os.path.isfile(exec_file): raise FileNotFoundError( f'Invalid MAPDL executable at ""{exec_file}""\n' ""Enter one manually using exec_file="" ) # verify run location if run_location is None: temp_dir = tempfile.gettempdir() run_location = os.path.join(temp_dir, ""ansys_%s"" % random_string(10)) if not os.path.isdir(run_location): try: os.mkdir(run_location) except: raise RuntimeError( ""Unable to create the temporary working "" f'directory ""{run_location}""\n' ""Please specify run_location="" ) else: if not os.path.isdir(run_location): raise FileNotFoundError(f'""{run_location}"" is not a valid directory') # verify no lock file and the mode is valid check_lock_file(run_location, jobname, override) mode = check_mode(mode, _version_from_path(exec_file)) # cache start parameters additional_switches = _validate_add_sw( additional_switches, exec_file, kwargs.pop(""force_intel"", False) ) if license_type is not None and isinstance(license_type, str): # In newer license server versions an invalid license name just get discarded and produces no effect or warning. # For example: # ```bash # mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES. # mapdl.exe - p yoyoyo # The -p flag is ignored and it run the default license. # ``` # # In older versions probably it might raise an error. But not sure. license_type = license_type.lower().strip() if 'enterprise' in license_type and 'solver' not in license_type: license_type = 'ansys' elif 'enterprise' in license_type and 'solver' in license_type: license_type = 'meba' elif 'premium' in license_type: license_type = 'mech_2' elif 'pro' in license_type: license_type = 'mech_1' elif license_type not in ALLOWABLE_LICENSES: allow_lics = [f""'{each}'"" for each in ALLOWABLE_LICENSES] warn_text = \ f""The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"" + \ ""Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"" + \ f""Recognized license names: {' '.join(allow_lics)}"" warnings.warn(warn_text, UserWarning) # LOG.warning(warn_text) additional_switches += ' -p ' + license_type # LOG.debug(f""Using specified license name '{license_type}' in the 'license_type' keyword argument."") elif '-p ' in additional_switches: # There is already a license request in additional switches. license_type = re.findall(r'-p \b(\w*)', additional_switches)[0] # getting only the first product license. if license_type not in ALLOWABLE_LICENSES: allow_lics = [f""'{each}'"" for each in ALLOWABLE_LICENSES] warn_text = \ f""The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"" + \ ""Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"" + \ f""Recognized license names: {' '.join(allow_lics)}"" warnings.warn(warn_text, UserWarning) # LOG.warning(warn_text) # LOG.debug(f""Using specified license name '{license_type}' in the additional switches parameter."") elif license_type is not None: raise TypeError(""The argument 'license_type' does only accept str or None."") start_parm = { ""exec_file"": exec_file, ""run_location"": run_location, ""additional_switches"": additional_switches, ""jobname"": jobname, ""nproc"": nproc, } if mode in [""console"", ""corba""]: start_parm[""start_timeout""] = start_timeout else: start_parm[""ram""] = ram start_parm[""override""] = override start_parm[""timeout""] = start_timeout # Check the license server if license_server_check: # configure timeout to be 90% of the wait time of the startup # time for Ansys. lic_check = LicenseChecker( timeout=start_timeout*0.9, verbose=verbose_mapdl ) lic_check.start() try: if mode == ""console"": from ansys.mapdl.core.mapdl_console import MapdlConsole mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm) elif mode == ""corba"": try: # pending deprication to ansys-mapdl-corba from ansys.mapdl.core.mapdl_corba import MapdlCorba except ImportError: raise ImportError( ""To use this feature, install the MAPDL CORBA package"" "" with:\n\npip install ansys_corba"" ) from None broadcast = kwargs.get(""log_broadcast"", False) mapdl = MapdlCorba( loglevel=loglevel, log_apdl=log_apdl, log_broadcast=broadcast, verbose=verbose_mapdl, **start_parm, ) elif mode == ""grpc"": port, actual_run_location = launch_grpc( port=port, verbose=verbose_mapdl, ip=ip, **start_parm ) mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=cleanup_on_exit, loglevel=loglevel, set_no_abort=set_no_abort, remove_temp_files=kwargs.pop(""remove_temp_files"", False), log_apdl=log_apdl, **start_parm, ) if run_location is None: mapdl._path = actual_run_location except Exception as exception: # Failed to launch for some reason. Check if failure was due # to the license check if license_server_check: lic_check.check() # pass raise exception # Setting license type. This is passed as an additional switch mapdl.license_type = license_type return mapdl " 47015,"def handle_metrics(mode, metrics, output_dir): """""" Log and save metrics Args: - mode: one of train, eval, test - metrics: metrics dict - output_dir: where to save the metrics """""" logger.info(f""***** {mode} metrics *****"") for key, value in metrics.items(): logger.info("" %s = %s"", key, value) save_json(metrics, os.path.join(output_dir, f""{mode}_results.json"")) ","def handle_metrics(mode, metrics, output_dir): """""" Log and save metrics Args: - mode: one of train, eval, test - metrics: metrics dict - output_dir: where to save the metrics """""" logger.info(f""***** {mode} metrics *****"") for key, value in metrics.items(): logger.info(f"" {key} = {value}"") save_json(metrics, os.path.join(output_dir, f""{mode}_results.json"")) " 43228,"def test_pickle(): size = 100 vbuf = ReplayBuffer(size, stack_num=2) lbuf = ListReplayBuffer() pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4) device = 'cuda' if torch.cuda.is_available() else 'cpu' rew = torch.tensor([1.]).to(device) print(rew) for i in range(4): vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0) for i in range(3): lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0) for i in range(5): pbuf.add(obs=Batch(index=np.array([i])), act=2, rew=rew, done=0, weight=np.random.rand()) # save pickle.dump(vbuf, open('/tmp/vbuf.pkl', 'wb')) pickle.dump(lbuf, open('/tmp/lbuf.pkl', 'wb')) pickle.dump(pbuf, open('/tmp/pbuf.pkl', 'wb')) # load _vbuf = pickle.load(open('/tmp/vbuf.pkl', 'rb')) _lbuf = pickle.load(open('/tmp/lbuf.pkl', 'rb')) _pbuf = pickle.load(open('/tmp/pbuf.pkl', 'rb')) assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act) assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act) assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act) # make sure the meta var is identical assert _vbuf.stack_num == vbuf.stack_num assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))], pbuf.weight[np.arange(len(pbuf))]) ","def test_pickle(): size = 100 vbuf = ReplayBuffer(size, stack_num=2) lbuf = ListReplayBuffer() pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4) device = 'cuda' if torch.cuda.is_available() else 'cpu' rew = torch.tensor([1.]).to(device) print(rew) for i in range(4): vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0) for i in range(3): lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0) for i in range(5): pbuf.add(obs=Batch(index=np.array([i])), act=2, rew=rew, done=0, weight=np.random.rand()) # save pickle.dump(vbuf, open('/tmp/vbuf.pkl', 'wb')) pickle.dump(lbuf, open('/tmp/lbuf.pkl', 'wb')) pickle.dump(pbuf, open('/tmp/pbuf.pkl', 'wb')) # load _vbuf = pickle.load(open('/tmp/vbuf.pkl', 'rb')) _lbuf = pickle.loads(ls) _pbuf = pickle.load(open('/tmp/pbuf.pkl', 'rb')) assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act) assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act) assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act) # make sure the meta var is identical assert _vbuf.stack_num == vbuf.stack_num assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))], pbuf.weight[np.arange(len(pbuf))]) " 30179,"def execute_link(link_cmd_args, record_streams, quiet): """""" Executes the passed command plus arguments in a subprocess and returns the return value of the executed command. If the specified standard output and standard error of the command are recorded and also returned to the caller. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). TBA (see https://github.com/in-toto/in-toto/issues/6) Executes passed command in a subprocess and redirects stdout and stderr if specified. - A dictionary containing standard output and standard error of the executed command, called by-products. Note: If record_streams is False, the dict values are empty strings. - The return value of the executed command. """""" if record_streams: if (quiet == False): #record_streams true, quiet false return_code, stdout_str, stderr_str = \ securesystemslib.process.run_duplicate_streams(link_cmd_args) else: #record_streams true, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.PIPE, stderr=securesystemslib.process.PIPE) stdout_str = process.stdout stderr_str = process.stderr return_code = process.returncode else: if (quiet == False): #record_streams false, quiet false process = securesystemslib.process.run(link_cmd_args, check=False, stdout=None, stderr=None) stdout_str = stderr_str = """" return_code = process.returncode else: #record_streams false, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.DEVNULL, stderr=securesystemslib.process.DEVNULL) stdout_str = stderr_str = """" return_code = process.returncode return { ""stdout"": stdout_str, ""stderr"": stderr_str, ""return-value"": return_code } ","def execute_link(link_cmd_args, record_streams, quiet): """""" Executes the passed command plus arguments in a subprocess and returns the return value of the executed command. If the specified standard output and standard error of the command are recorded and also returned to the caller. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). TBA (see https://github.com/in-toto/in-toto/issues/6) Executes passed command in a subprocess and redirects stdout and stderr if specified. - A dictionary containing standard output and standard error of the executed command, called by-products. Note: If record_streams is False, the dict values are empty strings. - The return value of the executed command. """""" if record_streams: if (quiet == False): #record_streams true, quiet false return_code, stdout_str, stderr_str = \ securesystemslib.process.run_duplicate_streams(link_cmd_args) else: #record_streams true, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.PIPE, stderr=securesystemslib.process.PIPE) stdout_str = process.stdout stderr_str = process.stderr return_code = process.returncode else: if (quiet == False): #record_streams false, quiet false process = securesystemslib.process.run(link_cmd_args, check=False, stdout=None, stderr=None) stdout_str = stderr_str = """" return_code = process.returncode else: #record_streams false, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.DEVNULL, stderr=securesystemslib.process.DEVNULL) stdout_str = stderr_str = """" return_code = process.returncode return { ""stdout"": stdout_str, ""stderr"": stderr_str, ""return-value"": return_code } " 50489,"def print_summary(covdata, log_summary): '''Print a small report to the standard output or to the logger in addition. Output the percentage, covered and total lines and branches. ''' (lines_total, lines_covered, percent, functions_total, functions_covered, percent_functions, branches_total, branches_covered, percent_branches) = get_global_stats(covdata) lines_out = ""lines: %0.1f%% (%s out of %s)"" % ( percent, lines_covered, lines_total ) functions_out = ""functions: %0.1f%% (%s out of %s)"" % ( percent_functions, functions_covered, functions_total ) branches_out = ""branches: %0.1f%% (%s out of %s)"" % ( percent_branches, branches_covered, branches_total ) if log_summary: logger.info(lines_out) logger.info(functions_out) logger.info(branches_out) else: sys.stdout.write(lines_out + '\n') sys.stdout.write(functions_out + '\n') sys.stdout.write(branches_out + '\n') ","def print_summary(covdata): '''Print a small report to the standard output. Output the percentage, covered and total lines and branches. ''' (lines_total, lines_covered, percent, functions_total, functions_covered, percent_functions, branches_total, branches_covered, percent_branches) = get_global_stats(covdata) lines_out = ""lines: %0.1f%% (%s out of %s)"" % ( percent, lines_covered, lines_total ) functions_out = ""functions: %0.1f%% (%s out of %s)"" % ( percent_functions, functions_covered, functions_total ) branches_out = ""branches: %0.1f%% (%s out of %s)"" % ( percent_branches, branches_covered, branches_total ) if log_summary: logger.info(lines_out) logger.info(functions_out) logger.info(branches_out) else: sys.stdout.write(lines_out + '\n') sys.stdout.write(functions_out + '\n') sys.stdout.write(branches_out + '\n') " 39727,"def parseIBDatetime(s: str) -> datetime.datetime: """"""Parse string in IB date or datetime format to datetime."""""" if len(s) == 8: # YYYYmmdd y = int(s[0:4]) m = int(s[4:6]) d = int(s[6:8]) dt = date(y, m, d) elif s.isdigit(): dt = datetime.fromtimestamp(int(s), timezone.utc) else: dt = datetime.strptime(s, '%Y%m%d %H:%M:%S') return dt ","def parseIBDatetime(s: str) -> Union[datetime.date, datetime.datetime]: """"""Parse string in IB date or datetime format to datetime."""""" if len(s) == 8: # YYYYmmdd y = int(s[0:4]) m = int(s[4:6]) d = int(s[6:8]) dt = date(y, m, d) elif s.isdigit(): dt = datetime.fromtimestamp(int(s), timezone.utc) else: dt = datetime.strptime(s, '%Y%m%d %H:%M:%S') return dt " 8892,"def auth_after_register(bot): """"""Do NickServ/AuthServ auth"""""" if bot.config.core.auth_method: auth_method = bot.config.core.auth_method auth_username = bot.config.core.auth_username auth_password = bot.config.core.auth_password auth_target = bot.config.core.auth_target elif bot.config.core.nick_auth_method: auth_method = bot.config.core.nick_auth_method auth_username = (bot.config.core.nick_auth_username or bot.config.core.nick) auth_password = bot.config.core.nick_auth_password auth_target = bot.config.core.nick_auth_target else: return # nickserv-based auth method need to check for current nick if auth_method == 'nickserv': if bot.nick != bot.settings.core.nick: LOGGER.warning('Sending nickserv GHOST command.') bot.say( 'GHOST %s %s' % (bot.settings.core.nick, auth_password), auth_target or 'NickServ') else: bot.say('IDENTIFY %s' % auth_password, auth_target or 'NickServ') # other methods use account instead of nick elif auth_method == 'authserv': bot.write(('AUTHSERV auth', auth_username + ' ' + auth_password)) elif auth_method == 'Q': bot.write(('AUTH', auth_username + ' ' + auth_password)) elif auth_method == 'userserv': bot.say(""LOGIN %s %s"" % (auth_username, auth_password), auth_target or 'UserServ') ","def auth_after_register(bot): """"""Do NickServ/AuthServ auth"""""" if bot.config.core.auth_method: auth_method = bot.config.core.auth_method auth_username = bot.config.core.auth_username auth_password = bot.config.core.auth_password auth_target = bot.config.core.auth_target elif bot.config.core.nick_auth_method: auth_method = bot.config.core.nick_auth_method auth_username = (bot.config.core.nick_auth_username or bot.config.core.nick) auth_password = bot.config.core.nick_auth_password auth_target = bot.config.core.nick_auth_target else: return # nickserv-based auth method needs to check for current nick if auth_method == 'nickserv': if bot.nick != bot.settings.core.nick: LOGGER.warning('Sending nickserv GHOST command.') bot.say( 'GHOST %s %s' % (bot.settings.core.nick, auth_password), auth_target or 'NickServ') else: bot.say('IDENTIFY %s' % auth_password, auth_target or 'NickServ') # other methods use account instead of nick elif auth_method == 'authserv': bot.write(('AUTHSERV auth', auth_username + ' ' + auth_password)) elif auth_method == 'Q': bot.write(('AUTH', auth_username + ' ' + auth_password)) elif auth_method == 'userserv': bot.say(""LOGIN %s %s"" % (auth_username, auth_password), auth_target or 'UserServ') " 37572,"def _plot_histogram_data(data, labels, number_to_keep): """"""Generate the data needed for plotting counts. Parameters: data (list or dict): This is either a list of dictionaries or a single dict containing the values to represent (ex {'001': 130}) labels (list): The list of bitstring labels for the plot. number_to_keep (int): The number of terms to plot and rest is made into a single bar called 'rest'. Returns: tuple: tuple containing: (dict): The labels actually used in the plotting. (list): List of ndarrays for the bars in each experiment. (list): Indices for the locations of the bars for each experiment. """""" labels_dict = OrderedDict() all_pvalues = [] all_inds = [] # if multiple executions, we consider number_to_keep for each execution # and this may result in more than number_to_keep slots multimple_exec_keys_dict = OrderedDict() if len(data) > 1 and number_to_keep is not None: for execution in data: for common_key in dict(Counter(execution).most_common(number_to_keep)): multimple_exec_keys_dict[common_key] = 1 for execution in data: if number_to_keep is not None: data_temp = dict(Counter(execution).most_common(number_to_keep)) data_temp[""rest""] = sum(execution.values()) - sum(data_temp.values()) execution = data_temp values = [] for key in labels: if key not in execution: if number_to_keep is None: labels_dict[key] = 1 values.append(0) else: if key in multimple_exec_keys_dict: # save label only if the key is present in other execution labels_dict[key] = 1 values.append(0) else: labels_dict[key] = 1 values.append(execution[key]) values = np.array(values, dtype=float) pvalues = values / sum(values) all_pvalues.append(pvalues) numelem = len(values) ind = np.arange(numelem) # the x locations for the groups all_inds.append(ind) return labels_dict, all_pvalues, all_inds ","def _plot_histogram_data(data, labels, number_to_keep): """"""Generate the data needed for plotting counts. Parameters: data (list or dict): This is either a list of dictionaries or a single dict containing the values to represent (ex {'001': 130}) labels (list): The list of bitstring labels for the plot. number_to_keep (int): The number of terms to plot and rest is made into a single bar called 'rest'. Returns: tuple: tuple containing: (dict): The labels actually used in the plotting. (list): List of ndarrays for the bars in each experiment. (list): Indices for the locations of the bars for each experiment. """""" labels_dict = OrderedDict() all_pvalues = [] all_inds = [] # if multiple executions, we consider number_to_keep for each execution # and this may result in more than number_to_keep slots multimple_exec_keys_dict = OrderedDict() if len(data) > 1 and number_to_keep is not None: for execution in data: for common_key in dict(Counter(execution).most_common(number_to_keep)): multimple_exec_keys_dict[common_key] = 1 for execution in data: if number_to_keep is not None: data_temp = dict(Counter(execution).most_common(number_to_keep)) data_temp[""rest""] = sum(execution.values()) - sum(data_temp.values()) execution = data_temp values = [] for key in labels: if key not in execution: if number_to_keep is None: labels_dict[key] = 1 values.append(0) else: if key in multiple_exec_keys_dict: # save label only if the key is present in other execution labels_dict[key] = 1 values.append(0) else: labels_dict[key] = 1 values.append(execution[key]) values = np.array(values, dtype=float) pvalues = values / sum(values) all_pvalues.append(pvalues) numelem = len(values) ind = np.arange(numelem) # the x locations for the groups all_inds.append(ind) return labels_dict, all_pvalues, all_inds " 50490,"def updateCopyrightString(filename, lines): newLines = list([]) iterLines = iter(lines) copyrightReached = False for line in iterLines: newLines.append(line) if line == ""COPYRIGHT = ("": copyrightReached = True break if not copyrightReached: raise RuntimeError(f""Start of copyright not found in {filename}."") for line in COPYRIGHT: newLines.append(f' ""{line}\\n""') copyrightEndReached = False for line in iterLines: if line == "")"": newLines.append(line) copyrightEndReached = True break if not copyrightEndReached: raise RuntimeError(f""End of copyright not found in {filename}."") for line in iterLines: newLines.append(line) return newLines ","def updateCopyrightString(filename, lines): newLines = list([]) iterLines = iter(lines) for line in iterLines: newLines.append(line) if line == ""COPYRIGHT = ("": break else: raise RuntimeError(f""Start of copyright not found in {filename}."") for line in COPYRIGHT: newLines.append(f' ""{line}\\n""') copyrightEndReached = False for line in iterLines: if line == "")"": newLines.append(line) copyrightEndReached = True break if not copyrightEndReached: raise RuntimeError(f""End of copyright not found in {filename}."") for line in iterLines: newLines.append(line) return newLines " 22367,"def brew_versions_info(package, tap_path): def versioned(recipe_path): if not os.path.isabs(recipe_path): recipe_path = os.path.join(os.getcwd(), recipe_path) # Dependencies in the same repository should be versioned, # core dependencies (presumably in base homebrew) are not # versioned. return tap_path in recipe_path # TODO: Also use tags. stdout = brew_execute([""versions"", package]) version_parts = [_ for _ in stdout.split(""\n"") if _ and ""git checkout"" in _] version_parts = map(lambda l: WHITESPACE_PATTERN.split(l), version_parts) info = [(p[0], p[3], versioned(p[4])) for p in version_parts] return info ","def brew_versions_info(package, tap_path): def versioned(recipe_path): if not os.path.isabs(recipe_path): recipe_path = os.path.join(os.getcwd(), recipe_path) # Dependencies in the same repository should be versioned, # core dependencies (presumably in base homebrew) are not # versioned. return tap_path in recipe_path # TODO: Also use tags. stdout = brew_execute([""versions"", package]) version_parts = [line for line in stdout.split(""\n"") if line and ""git checkout"" in line] version_parts = map(lambda l: WHITESPACE_PATTERN.split(l), version_parts) info = [(p[0], p[3], versioned(p[4])) for p in version_parts] return info " 33585,"def log_sync_template(): """"""Syncs the local_dir on driver to worker if possible. Requires ray cluster to be started with the autoscaler. Also requires rsync to be installed. """""" if not distutils.spawn.find_executable(""rsync""): logger.error(""Log sync requires rsync to be installed."") return global _log_sync_warned ssh_key = get_ssh_key() if ssh_key is None: if not _log_sync_warned: logger.error(""Log sync requires cluster to be setup with "" ""`ray up`."") _log_sync_warned = True return return (""""""rsync -savz -e ""ssh -i {ssh_key} -o ConnectTimeout=120s """""" """"""-o StrictHostKeyChecking=no"" {{source}} {{target}}"""""" ).format(ssh_key=quote(ssh_key)) ","def log_sync_template(): """"""Syncs the local_dir from driver to worker if possible. Requires ray cluster to be started with the autoscaler. Also requires rsync to be installed. """""" if not distutils.spawn.find_executable(""rsync""): logger.error(""Log sync requires rsync to be installed."") return global _log_sync_warned ssh_key = get_ssh_key() if ssh_key is None: if not _log_sync_warned: logger.error(""Log sync requires cluster to be setup with "" ""`ray up`."") _log_sync_warned = True return return (""""""rsync -savz -e ""ssh -i {ssh_key} -o ConnectTimeout=120s """""" """"""-o StrictHostKeyChecking=no"" {{source}} {{target}}"""""" ).format(ssh_key=quote(ssh_key)) " 11960,"def add_network_options(parser): parser.add_argument(""-1"", ""--oneserver"", action=""store_true"", dest=""oneserver"", default=None, help=""connect to one server only"") parser.add_argument(""-s"", ""--server"", dest=""server"", default=None, help=""set server host:port:protocol, where protocol is either t (tcp) or s (ssl)"") parser.add_argument(""-p"", ""--proxy"", dest=""proxy"", default=None, help=""set proxy [type:]host[:port] (or none to disable proxy), where type is socks4,socks5 or http"") parser.add_argument(""--noonion"", action=""store_true"", dest=""noonion"", default=None, help=""do not try to connect to onion servers"") parser.add_argument(""--skipmerklecheck"", action=""store_true"", dest=""skipmerklecheck"", default=False, help=""Tolerate invalid merkle proofs from server"") ","def add_network_options(parser): parser.add_argument(""-1"", ""--oneserver"", action=""store_true"", dest=""oneserver"", default=None, help=""connect to one server only"") parser.add_argument(""-s"", ""--server"", dest=""server"", default=None, help=""set server host:port:protocol, where protocol is either t (tcp) or s (ssl)"") parser.add_argument(""-p"", ""--proxy"", dest=""proxy"", default=None, help=""set proxy [type:]host[:port] (or 'none' to disable proxy), where type is socks4,socks5 or http"") parser.add_argument(""--noonion"", action=""store_true"", dest=""noonion"", default=None, help=""do not try to connect to onion servers"") parser.add_argument(""--skipmerklecheck"", action=""store_true"", dest=""skipmerklecheck"", default=False, help=""Tolerate invalid merkle proofs from server"") " 1092,"def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=None, input_number=None, ignore_inputs=None): """""" Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. Args: * inputs: inputs of the Nipype interface. * interface: Nipype interface. * input_name: name of the Nipype input. * spec: Nipype input spec. * verbose: print information messages. * handler: used when handling compound inputs, which don't have their own input spec * input_number: used when handling compound inputs to assign each a unique ID * ignore_inputs: list of interface inputs to not include in the descriptor Assumes that: * Input names are unique. """""" # If spec has a name source, means it's an output, so skip it here. # Also skip any ignored inputs if spec.name_source or ignore_inputs is not None and input_name in ignore_inputs: return None inp = {} if input_number is not None and input_number != 0: # No need to append a number to the first of a list of compound inputs inp['id'] = input_name + ""_"" + str(input_number + 1) else: inp['id'] = input_name inp['name'] = input_name.replace('_', ' ').capitalize() if handler is None: trait_handler = spec.handler else: trait_handler = handler # Figure out the input type from its handler type handler_type = type(trait_handler).__name__ # Deal with compound traits if handler_type == ""TraitCompound"": input_list = [] # Recursively create an input for each trait for i in range(0, len(trait_handler.handlers)): inp = get_boutiques_input(inputs, interface, input_name, spec, verbose, trait_handler.handlers[i], i) inp['optional'] = True input_list.append(inp) return input_list if handler_type == ""File"" or handler_type == ""Directory"": inp['type'] = ""File"" elif handler_type == ""Int"": inp['type'] = ""Number"" inp['integer'] = True elif handler_type == ""Float"": inp['type'] = ""Number"" elif handler_type == ""Bool"": if spec.argstr and len(spec.argstr.split(""="")) > 1 and (spec.argstr.split(""="")[1] == '0' or spec.argstr.split(""="")[1] == '1'): inp['type'] = ""Number"" inp['integer'] = True inp['minimum'] = 0 inp['maximum'] = 1 else: inp['type'] = ""Flag"" else: inp['type'] = ""String"" # Deal with range inputs if handler_type == ""Range"": inp['type'] = ""Number"" if trait_handler._low is not None: inp['minimum'] = trait_handler._low if trait_handler._high is not None: inp['maximum'] = trait_handler._high if trait_handler._exclude_low: inp['exclusive-minimum'] = True if trait_handler._exclude_high: inp['exclusive-maximum'] = True # Deal with list inputs # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) if handler_type == ""List"": inp['list'] = True trait_type = type(trait_handler.item_trait.trait_type).__name__ if trait_type == ""Int"": inp['integer'] = True inp['type'] = ""Number"" elif trait_type == ""Float"": inp['type'] = ""Number"" elif trait_type == ""File"": inp['type'] = ""File"" else: inp['type'] = ""String"" if trait_handler.minlen != 0: inp['min-list-entries'] = trait_handler.minlen if trait_handler.maxlen != six.MAXSIZE: inp['max-list-entries'] = trait_handler.maxlen if spec.sep: inp['list-separator'] = spec.sep if handler_type == ""Tuple"": inp['list'] = True inp['min-list-entries'] = len(spec.default) inp['max-list-entries'] = len(spec.default) input_type = type(spec.default[0]).__name__ if input_type == 'int': inp['type'] = ""Number"" inp['integer'] = True elif input_type == 'float': inp['type'] = ""Number"" else: inp['type'] = ""String"" # Deal with multi-input if handler_type == ""InputMultiObject"": inp['type'] = ""File"" inp['list'] = True inp['value-key'] = ""["" + input_name.upper( ) + ""]"" # assumes that input names are unique # Add the command line flag specified by argstr # If no argstr is provided and input type is Flag, create a flag from the name if spec.argstr: if ""="" in spec.argstr: inp['command-line-flag'] = spec.argstr.split(""="")[0].strip() inp['command-line-flag-separator'] = ""="" elif spec.argstr.split(""%"")[0]: inp['command-line-flag'] = spec.argstr.split(""%"")[0].strip() elif inp['type'] == ""Flag"": inp['command-line-flag'] = (""--%s"" % input_name + "" "").strip() inp['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, ""mandatory"") and spec.mandatory): inp['optional'] = True else: inp['optional'] = False if spec.usedefault: inp['default-value'] = spec.default_value()[1] try: value_choices = trait_handler.values except AttributeError: pass else: if value_choices is not None: if all(isinstance(n, int) for n in value_choices): inp['type'] = ""Number"" inp['integer'] = True elif all(isinstance(n, float) for n in value_choices): inp['type'] = ""Number"" inp['value-choices'] = value_choices return inp ","def get_boutiques_input(inputs, interface, input_name, spec, verbose, handler=None, input_number=None, ignore_inputs=None): """""" Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. Args: * inputs: inputs of the Nipype interface. * interface: Nipype interface. * input_name: name of the Nipype input. * spec: Nipype input spec. * verbose: print information messages. * handler: used when handling compound inputs, which don't have their own input spec * input_number: used when handling compound inputs to assign each a unique ID * ignore_inputs: list of interface inputs to not include in the descriptor Assumes that: * Input names are unique. """""" # If spec has a name source, means it's an output, so skip it here. # Also skip any ignored inputs if spec.name_source or ignore_inputs is not None and input_name in ignore_inputs: return None inp = {} if input_number: # No need to append a number to the first of a list of compound inputs inp['id'] = input_name + ""_"" + str(input_number + 1) else: inp['id'] = input_name inp['name'] = input_name.replace('_', ' ').capitalize() if handler is None: trait_handler = spec.handler else: trait_handler = handler # Figure out the input type from its handler type handler_type = type(trait_handler).__name__ # Deal with compound traits if handler_type == ""TraitCompound"": input_list = [] # Recursively create an input for each trait for i in range(0, len(trait_handler.handlers)): inp = get_boutiques_input(inputs, interface, input_name, spec, verbose, trait_handler.handlers[i], i) inp['optional'] = True input_list.append(inp) return input_list if handler_type == ""File"" or handler_type == ""Directory"": inp['type'] = ""File"" elif handler_type == ""Int"": inp['type'] = ""Number"" inp['integer'] = True elif handler_type == ""Float"": inp['type'] = ""Number"" elif handler_type == ""Bool"": if spec.argstr and len(spec.argstr.split(""="")) > 1 and (spec.argstr.split(""="")[1] == '0' or spec.argstr.split(""="")[1] == '1'): inp['type'] = ""Number"" inp['integer'] = True inp['minimum'] = 0 inp['maximum'] = 1 else: inp['type'] = ""Flag"" else: inp['type'] = ""String"" # Deal with range inputs if handler_type == ""Range"": inp['type'] = ""Number"" if trait_handler._low is not None: inp['minimum'] = trait_handler._low if trait_handler._high is not None: inp['maximum'] = trait_handler._high if trait_handler._exclude_low: inp['exclusive-minimum'] = True if trait_handler._exclude_high: inp['exclusive-maximum'] = True # Deal with list inputs # TODO handle lists of lists (e.g. FSL ProbTrackX seed input) if handler_type == ""List"": inp['list'] = True trait_type = type(trait_handler.item_trait.trait_type).__name__ if trait_type == ""Int"": inp['integer'] = True inp['type'] = ""Number"" elif trait_type == ""Float"": inp['type'] = ""Number"" elif trait_type == ""File"": inp['type'] = ""File"" else: inp['type'] = ""String"" if trait_handler.minlen != 0: inp['min-list-entries'] = trait_handler.minlen if trait_handler.maxlen != six.MAXSIZE: inp['max-list-entries'] = trait_handler.maxlen if spec.sep: inp['list-separator'] = spec.sep if handler_type == ""Tuple"": inp['list'] = True inp['min-list-entries'] = len(spec.default) inp['max-list-entries'] = len(spec.default) input_type = type(spec.default[0]).__name__ if input_type == 'int': inp['type'] = ""Number"" inp['integer'] = True elif input_type == 'float': inp['type'] = ""Number"" else: inp['type'] = ""String"" # Deal with multi-input if handler_type == ""InputMultiObject"": inp['type'] = ""File"" inp['list'] = True inp['value-key'] = ""["" + input_name.upper( ) + ""]"" # assumes that input names are unique # Add the command line flag specified by argstr # If no argstr is provided and input type is Flag, create a flag from the name if spec.argstr: if ""="" in spec.argstr: inp['command-line-flag'] = spec.argstr.split(""="")[0].strip() inp['command-line-flag-separator'] = ""="" elif spec.argstr.split(""%"")[0]: inp['command-line-flag'] = spec.argstr.split(""%"")[0].strip() elif inp['type'] == ""Flag"": inp['command-line-flag'] = (""--%s"" % input_name + "" "").strip() inp['description'] = get_description_from_spec(inputs, input_name, spec) if not (hasattr(spec, ""mandatory"") and spec.mandatory): inp['optional'] = True else: inp['optional'] = False if spec.usedefault: inp['default-value'] = spec.default_value()[1] try: value_choices = trait_handler.values except AttributeError: pass else: if value_choices is not None: if all(isinstance(n, int) for n in value_choices): inp['type'] = ""Number"" inp['integer'] = True elif all(isinstance(n, float) for n in value_choices): inp['type'] = ""Number"" inp['value-choices'] = value_choices return inp " 31475,"def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True, email_refresh_token=''): modified_packs, modified_files = get_pr_modified_files_and_packs(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) for pack in modified_packs: tagged_packs_reviewers = get_pr_tagged_reviewers(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl, pack=pack) reviewers = set() pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA) if not os.path.exists(pack_metadata_path): print(f""Not found {pack} {PACK_METADATA} file."") continue with open(pack_metadata_path, 'r') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) if pack_metadata.get('support') != XSOAR_SUPPORT: # Notify contributors by tagging them on github: if pack_metadata.get(PACK_METADATA_GITHUB_USER_FIELD): pack_reviewers = pack_metadata[PACK_METADATA_GITHUB_USER_FIELD] pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split("","") github_users = [u.lower() for u in pack_reviewers] for github_user in github_users: user_exists = check_if_user_exists(github_user=github_user, github_token=github_token, verify_ssl=verify_ssl) if user_exists and github_user != pr_author and github_user not in tagged_packs_reviewers: reviewers.add(github_user) print(f""Found {github_user} default reviewer of pack {pack}"") check_reviewers(reviewers=reviewers, pr_author=pr_author, version=pack_metadata.get('currentVersion'), modified_files=modified_files, pack=pack, pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) # Notify contributors by emailing them if this is not new pack: if (pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD) or pack_metadata.get( PACK_METADATA_SUPPORT_EMAIL_FIELD)) and pack_metadata.get('currentVersion') != '1.0.0': notify_contributors_by_email( dev_reviewers_emails=pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD, ''), support_reviewers_emails=pack_metadata.get(PACK_METADATA_SUPPORT_EMAIL_FIELD, ''), email_refresh_token=email_refresh_token, pack=pack, pr_number=pr_number, ) elif pack_metadata.get('support') == XSOAR_SUPPORT: print(f""Skipping check of {pack} pack supported by {XSOAR_SUPPORT}"") else: print(f""{pack} pack has no default github reviewer"") ","def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True, email_refresh_token=None): modified_packs, modified_files = get_pr_modified_files_and_packs(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) for pack in modified_packs: tagged_packs_reviewers = get_pr_tagged_reviewers(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl, pack=pack) reviewers = set() pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA) if not os.path.exists(pack_metadata_path): print(f""Not found {pack} {PACK_METADATA} file."") continue with open(pack_metadata_path, 'r') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) if pack_metadata.get('support') != XSOAR_SUPPORT: # Notify contributors by tagging them on github: if pack_metadata.get(PACK_METADATA_GITHUB_USER_FIELD): pack_reviewers = pack_metadata[PACK_METADATA_GITHUB_USER_FIELD] pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split("","") github_users = [u.lower() for u in pack_reviewers] for github_user in github_users: user_exists = check_if_user_exists(github_user=github_user, github_token=github_token, verify_ssl=verify_ssl) if user_exists and github_user != pr_author and github_user not in tagged_packs_reviewers: reviewers.add(github_user) print(f""Found {github_user} default reviewer of pack {pack}"") check_reviewers(reviewers=reviewers, pr_author=pr_author, version=pack_metadata.get('currentVersion'), modified_files=modified_files, pack=pack, pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl) # Notify contributors by emailing them if this is not new pack: if (pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD) or pack_metadata.get( PACK_METADATA_SUPPORT_EMAIL_FIELD)) and pack_metadata.get('currentVersion') != '1.0.0': notify_contributors_by_email( dev_reviewers_emails=pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD, ''), support_reviewers_emails=pack_metadata.get(PACK_METADATA_SUPPORT_EMAIL_FIELD, ''), email_refresh_token=email_refresh_token, pack=pack, pr_number=pr_number, ) elif pack_metadata.get('support') == XSOAR_SUPPORT: print(f""Skipping check of {pack} pack supported by {XSOAR_SUPPORT}"") else: print(f""{pack} pack has no default github reviewer"") " 32819,"def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.set_tag(http.METHOD, method) if url: span.set_tag(http.URL, url) if status_code: span.set_tag(http.STATUS_CODE, status_code) if 500 <= int(status_code) < 600: span.error = 1 ","def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.set_tag(http.METHOD, method) if url: span.set_tag(http.URL, url) if status_code: span.meta[http.STATUS_CODE] = str(status_code) if 500 <= int(status_code) < 600: span.error = 1 " 295,"def sample( draws=500, step=None, init=""auto"", n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, callback=None, **kwargs ): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See ``discard_tuned_samples``. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is ``'jitter+adapt_diag'``, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as ``adapt_diag``\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. step : function or iterable of functions A step function or collection of functions. If there are variables without step methods, step methods for those variables will be assigned automatically. By default the NUTS step method will be used, if appropriate to the model; this is a good default for beginning users. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to ``trace.point(-1))`` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see ``init`` keyword) can overwrite the default. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number ``chain``. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). chain_idx : int Chain number used to store sample in backend. If ``chains`` is greater than one, chain numbers will start here. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If ``None``, then set to either ``cores`` or 2, whichever is larger. cores : int The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system, but at most 4. tune : int Number of iterations to tune, defaults to 500. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to False. progressbar : bool, optional default=True Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in ``with`` context) random_seed : int or list of ints A list is accepted if ``cores`` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``. callback : function, default=None A function which gets called for every sample from the trace of a chain. The function is called with the trace and the current draw and will contain all samples for a single trace. the ``draw.chain`` argument can be used to determine which of the active chains the sample is drawn from. Sampling can be interruptec by throwing a ``KeyboardInterrupt`` in the callback. Returns ------- trace : pymc3.backends.base.MultiTrace A ``MultiTrace`` object that contains the samples. Notes ----- Optional keyword arguments can be passed to ``sample`` to be delivered to the ``step_method``s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop(""nuts_kwargs"", None) if nuts_kwargs is not None: warnings.warn( ""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning, ) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop(""step_kwargs"", None) if step_kwargs is not None: warnings.warn( ""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning, ) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if ""njobs"" in kwargs: cores = kwargs[""njobs""] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning ) if ""nchains"" in kwargs: chains = kwargs[""nchains""] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning ) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError(""Invalid value for `random_seed`. Must be tuple, list or int"") if ""chain"" in kwargs: chain_idx = kwargs[""chain""] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning ) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError(""The model does not contain any free variables."") if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info(""Auto-assigning NUTS sampler..."") start_, step = init_nuts( init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs ) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug(""Exception in init nuts"", exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = { ""draws"": draws, ""step"": step, ""start"": start, ""trace"": trace, ""chain"": chain_idx, ""chains"": chains, ""tune"": tune, ""progressbar"": progressbar, ""model"": model, ""random_seed"": random_seed, ""cores"": cores, ""callback"": callback, } sample_args.update(kwargs) has_population_samplers = np.any( [ isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step]) ] ) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info(""Multiprocess sampling ({} chains in {} jobs)"".format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug(""Pickling error:"", exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug(""Pickling error:"", exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: has_demcmc = np.any([ isinstance(m, DEMetropolis) for m in (step.methods if isinstance(step, CompoundStep) else [step]) ]) _log.info('Population sampling ({} chains)'.format(chains)) if has_demcmc and chains < 3: raise ValueError( 'DEMetropolis requires at least 3 chains. ' \ 'For this {}-dimensional model you should use ≥{} chains'.format(model.ndim, model.ndim + 1) ) if has_demcmc and chains <= model.ndim: warnings.warn( 'DEMetropolis should be used with more chains than dimensions! ' '(The model has {} dimensions.)'.format(model.ndim), UserWarning ) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info(""Sequential sampling ({} chains in 1 job)"".format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws - tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace ","def sample( draws=500, step=None, init=""auto"", n_init=200000, start=None, trace=None, chain_idx=0, chains=None, cores=None, tune=500, progressbar=True, model=None, random_seed=None, discard_tuned_samples=True, compute_convergence_checks=True, callback=None, **kwargs ): """"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. Parameters ---------- draws : int The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default. See ``discard_tuned_samples``. init : str Initialization method to use for auto-assigned NUTS samplers. * auto : Choose a default initialization method automatically. Currently, this is ``'jitter+adapt_diag'``, but this can change in the future. If you depend on the exact behaviour, choose an initialization method explicitly. * adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the variance of the tuning samples. All chains use the test value (usually the prior mean) as starting point. * jitter+adapt_diag : Same as ``adapt_diag``\, but add uniform jitter in [-1, 1] to the starting point in each chain. * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the sample variance of the tuning samples. * advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based on the variance of the gradients during tuning. This is **experimental** and might be removed in a future release. * advi : Run ADVI to estimate posterior mean and diagonal mass matrix. * advi_map: Initialize ADVI with MAP and use MAP as starting point. * map : Use the MAP as starting point. This is discouraged. * nuts : Run NUTS and estimate posterior mean and mass matrix from the trace. step : function or iterable of functions A step function or collection of functions. If there are variables without step methods, step methods for those variables will be assigned automatically. By default the NUTS step method will be used, if appropriate to the model; this is a good default for beginning users. n_init : int Number of iterations of initializer. Only works for 'nuts' and 'ADVI'. If 'ADVI', number of iterations, if 'nuts', number of draws. start : dict, or array of dict Starting point in parameter space (or partial point) Defaults to ``trace.point(-1))`` if there is a trace provided and model.test_point if not (defaults to empty dict). Initialization methods for NUTS (see ``init`` keyword) can overwrite the default. trace : backend, list, or MultiTrace This should be a backend instance, a list of variables to track, or a MultiTrace object with past values. If a MultiTrace object is given, it must contain samples for the chain number ``chain``. If None or a list of variables, the NDArray backend is used. Passing either ""text"" or ""sqlite"" is taken as a shortcut to set up the corresponding backend (with ""mcmc"" used as the base name). chain_idx : int Chain number used to store sample in backend. If ``chains`` is greater than one, chain numbers will start here. chains : int The number of chains to sample. Running independent chains is important for some convergence statistics and can also reveal multiple modes in the posterior. If ``None``, then set to either ``cores`` or 2, whichever is larger. cores : int The number of chains to run in parallel. If ``None``, set to the number of CPUs in the system, but at most 4. tune : int Number of iterations to tune, defaults to 500. Samplers adjust the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition to the number specified in the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to False. progressbar : bool, optional default=True Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion (""expected time of arrival""; ETA). model : Model (optional if in ``with`` context) random_seed : int or list of ints A list is accepted if ``cores`` is greater than one. discard_tuned_samples : bool Whether to discard posterior samples of the tune interval. compute_convergence_checks : bool, default=True Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``. callback : function, default=None A function which gets called for every sample from the trace of a chain. The function is called with the trace and the current draw and will contain all samples for a single trace. the ``draw.chain`` argument can be used to determine which of the active chains the sample is drawn from. Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback. Returns ------- trace : pymc3.backends.base.MultiTrace A ``MultiTrace`` object that contains the samples. Notes ----- Optional keyword arguments can be passed to ``sample`` to be delivered to the ``step_method``s used during sampling. In particular, the NUTS step method accepts a number of arguments. Common options are: * target_accept: float in [0, 1]. The step size is tuned such that we approximate this acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic posteriors. * max_treedepth: The maximum depth of the trajectory tree. * step_scale: float, default 0.25 The initial guess for the step size scaled down by :math:`1/n**(1/4)` You can find a full list of arguments in the docstring of the step methods. Examples -------- .. code:: ipython >>> import pymc3 as pm ... n = 100 ... h = 61 ... alpha = 2 ... beta = 2 .. code:: ipython >>> with pm.Model() as model: # context management ... p = pm.Beta('p', alpha=alpha, beta=beta) ... y = pm.Binomial('y', n=n, p=p, observed=h) ... trace = pm.sample(2000, tune=1000, cores=4) >>> pm.summary(trace) mean sd mc_error hpd_2.5 hpd_97.5 p 0.604625 0.047086 0.00078 0.510498 0.694774 """""" model = modelcontext(model) nuts_kwargs = kwargs.pop(""nuts_kwargs"", None) if nuts_kwargs is not None: warnings.warn( ""The nuts_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning, ) kwargs.update(nuts_kwargs) step_kwargs = kwargs.pop(""step_kwargs"", None) if step_kwargs is not None: warnings.warn( ""The step_kwargs argument has been deprecated. Pass step "" ""method arguments directly to sample instead"", DeprecationWarning, ) kwargs.update(step_kwargs) if cores is None: cores = min(4, _cpu_count()) if ""njobs"" in kwargs: cores = kwargs[""njobs""] warnings.warn( ""The njobs argument has been deprecated. Use cores instead."", DeprecationWarning ) if ""nchains"" in kwargs: chains = kwargs[""nchains""] warnings.warn( ""The nchains argument has been deprecated. Use chains instead."", DeprecationWarning ) if chains is None: chains = max(2, cores) if isinstance(start, dict): start = [start] * chains if random_seed == -1: random_seed = None if chains == 1 and isinstance(random_seed, int): random_seed = [random_seed] if random_seed is None or isinstance(random_seed, int): if random_seed is not None: np.random.seed(random_seed) random_seed = [np.random.randint(2 ** 30) for _ in range(chains)] if not isinstance(random_seed, Iterable): raise TypeError(""Invalid value for `random_seed`. Must be tuple, list or int"") if ""chain"" in kwargs: chain_idx = kwargs[""chain""] warnings.warn( ""The chain argument has been deprecated. Use chain_idx instead."", DeprecationWarning ) if start is not None: for start_vals in start: _check_start_shape(model, start_vals) # small trace warning if draws == 0: msg = ""Tuning was enabled throughout the whole trace."" _log.warning(msg) elif draws < 500: msg = ""Only %s samples in chain."" % draws _log.warning(msg) draws += tune if model.ndim == 0: raise ValueError(""The model does not contain any free variables."") if step is None and init is not None and all_continuous(model.vars): try: # By default, try to use NUTS _log.info(""Auto-assigning NUTS sampler..."") start_, step = init_nuts( init=init, chains=chains, n_init=n_init, model=model, random_seed=random_seed, progressbar=progressbar, **kwargs ) if start is None: start = start_ except (AttributeError, NotImplementedError, tg.NullTypeGradError): # gradient computation failed _log.info(""Initializing NUTS failed. "" ""Falling back to elementwise auto-assignment."") _log.debug(""Exception in init nuts"", exec_info=True) step = assign_step_methods(model, step, step_kwargs=kwargs) else: step = assign_step_methods(model, step, step_kwargs=kwargs) if isinstance(step, list): step = CompoundStep(step) if start is None: start = {} if isinstance(start, dict): start = [start] * chains sample_args = { ""draws"": draws, ""step"": step, ""start"": start, ""trace"": trace, ""chain"": chain_idx, ""chains"": chains, ""tune"": tune, ""progressbar"": progressbar, ""model"": model, ""random_seed"": random_seed, ""cores"": cores, ""callback"": callback, } sample_args.update(kwargs) has_population_samplers = np.any( [ isinstance(m, arraystep.PopulationArrayStepShared) for m in (step.methods if isinstance(step, CompoundStep) else [step]) ] ) parallel = cores > 1 and chains > 1 and not has_population_samplers if parallel: _log.info(""Multiprocess sampling ({} chains in {} jobs)"".format(chains, cores)) _print_step_hierarchy(step) try: trace = _mp_sample(**sample_args) except pickle.PickleError: _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug(""Pickling error:"", exec_info=True) parallel = False except AttributeError as e: if str(e).startswith(""AttributeError: Can't pickle""): _log.warning(""Could not pickle model, sampling singlethreaded."") _log.debug(""Pickling error:"", exec_info=True) parallel = False else: raise if not parallel: if has_population_samplers: has_demcmc = np.any([ isinstance(m, DEMetropolis) for m in (step.methods if isinstance(step, CompoundStep) else [step]) ]) _log.info('Population sampling ({} chains)'.format(chains)) if has_demcmc and chains < 3: raise ValueError( 'DEMetropolis requires at least 3 chains. ' \ 'For this {}-dimensional model you should use ≥{} chains'.format(model.ndim, model.ndim + 1) ) if has_demcmc and chains <= model.ndim: warnings.warn( 'DEMetropolis should be used with more chains than dimensions! ' '(The model has {} dimensions.)'.format(model.ndim), UserWarning ) _print_step_hierarchy(step) trace = _sample_population(**sample_args, parallelize=cores > 1) else: _log.info(""Sequential sampling ({} chains in 1 job)"".format(chains)) _print_step_hierarchy(step) trace = _sample_many(**sample_args) discard = tune if discard_tuned_samples else 0 trace = trace[discard:] if compute_convergence_checks: if draws - tune < 100: warnings.warn(""The number of samples is too small to check convergence reliably."") else: trace.report._run_convergence_checks(trace, model) trace.report._log_summary() return trace " 46217,"def color_dict_to_colormap(colors): """""" Generate a color map based on the given color dictionary Parameters ---------- colors : dict of int to array of float, shape (4) Mapping between labels and color Returns ------- colormap : Colormap Colormap constructed with provided control colors label_color_index : dict of int Mapping of Label to color control point within colormap """""" control_colors = np.unique( [color for label, color in colors.items()], axis=0 ) colormap = Colormap(control_colors) label_color_index = {} for i, (label, color) in enumerate(colors.items()): for j, control_color in enumerate(control_colors): if (control_color == color).all(): label_color_index[label] = j / (len(control_colors) - 1) break return colormap, label_color_index ","def color_dict_to_colormap(colors): """""" Generate a color map based on the given color dictionary Parameters ---------- colors : dict of int to array of float, shape (4) Mapping between labels and color Returns ------- colormap : Colormap Colormap constructed with provided control colors label_color_index : dict of int Mapping of Label to color control point within colormap """""" control_colors = np.unique(list(colors.values()), axis=0) colormap = Colormap(control_colors) label_color_index = {} for i, (label, color) in enumerate(colors.items()): for j, control_color in enumerate(control_colors): if (control_color == color).all(): label_color_index[label] = j / (len(control_colors) - 1) break return colormap, label_color_index " 49617,"def store( sources: Array | Collection[Array], targets: Array | Collection[Array], lock: bool | Lock = True, regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None, compute: bool = True, return_stored: bool = False, **kwargs, ): """"""Store dask arrays in array-like objects, overwrite data in target This stores dask arrays into object that supports numpy-style setitem indexing. It stores values chunk by chunk so that it does not have to fill up memory. For best performance you can align the block size of the storage target with the block size of your array. If your data fits in memory then you may prefer calling ``np.array(myarray)`` instead. Parameters ---------- sources: Array or collection of Arrays targets: array-like or Delayed or collection of array-likes and/or Delayeds These should support setitem syntax ``target[10:20] = ...`` lock: boolean or threading.Lock, optional Whether or not to lock the data stores while storing. Pass True (lock each file individually), False (don't lock) or a particular :class:`threading.Lock` object to be shared among all writes. regions: tuple of slices or collection of tuples of slices Each ``region`` tuple in ``regions`` should be such that ``target[region].shape = source.shape`` for the corresponding source and target in sources and targets, respectively. If this is a tuple, the contents will be assumed to be slices, so do not provide a tuple of tuples. compute: boolean, optional If true compute immediately; return :class:`dask.delayed.Delayed` otherwise. return_stored: boolean, optional Optionally return the stored result (default False). kwargs: Parameters passed to compute/persist (only used if compute=True) Returns ------- If return_stored=True tuple of Arrays If return_stored=False and compute=True None If return_stored=False and compute=False Delayed Examples -------- >>> import h5py # doctest: +SKIP >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP >>> dset = f.create_dataset('/data', shape=x.shape, ... chunks=x.chunks, ... dtype='f8') # doctest: +SKIP >>> store(x, dset) # doctest: +SKIP Alternatively store many arrays at the same time >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP """""" if isinstance(sources, Array): sources = [sources] targets = [targets] # type: ignore if any(not isinstance(s, Array) for s in sources): raise ValueError(""All sources must be dask array objects"") if len(sources) != len(targets): raise ValueError( ""Different number of sources [%d] and targets [%d]"" % (len(sources), len(targets)) ) if isinstance(regions, tuple) or regions is None: regions = [regions] # type: ignore if len(sources) > 1 and len(regions) == 1: regions *= len(sources) # type: ignore if len(sources) != len(regions): raise ValueError( ""Different number of sources [%d] and targets [%d] than regions [%d]"" % (len(sources), len(targets), len(regions)) ) # Optimize all sources together sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources]) sources_layer = Array.__dask_optimize__( sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources])) ) sources_name = ""store-sources-"" + tokenize(sources) layers = {sources_name: sources_layer} dependencies: dict[str, set] = {sources_name: set()} # Optimize all targets together targets_keys = [] targets_dsks = [] for t in targets: if isinstance(t, Delayed): targets_keys.append(t.key) targets_dsks.append(t.__dask_graph__()) elif is_dask_collection(t): raise TypeError(""Targets must be either Delayed objects or array-likes"") if targets_dsks: targets_hlg = HighLevelGraph.merge(*targets_dsks) targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys) targets_name = ""store-targets-"" + tokenize(targets_keys) layers[targets_name] = targets_layer dependencies[targets_name] = set() load_stored = return_stored and not compute map_names = [ ""store-map-"" + tokenize(s, t if isinstance(t, Delayed) else id(t), r) for s, t, r in zip(sources, targets, regions) ] map_keys: list = [] for s, t, n, r in zip(sources, targets, map_names, regions): map_layer = insert_to_ooc( keys=s.__dask_keys__(), chunks=s.chunks, out=t.key if isinstance(t, Delayed) else t, name=n, lock=lock, region=r, return_stored=return_stored, load_stored=load_stored, ) layers[n] = map_layer if isinstance(t, Delayed): dependencies[n] = {sources_name, targets_name} else: dependencies[n] = {sources_name} map_keys += map_layer.keys() if return_stored: store_dsk = HighLevelGraph(layers, dependencies) load_store_dsk: HighLevelGraph | Mapping = store_dsk if compute: store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys] store_dlyds = persist(*store_dlyds, **kwargs) store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds]) load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2) map_names = [""load-"" + n for n in map_names] return tuple( Array(load_store_dsk, n, s.chunks, meta=s) for s, n in zip(sources, map_names) ) elif compute: store_dsk = HighLevelGraph(layers, dependencies) compute_as_if_collection(Array, store_dsk, map_keys, **kwargs) return None else: key = ""store-"" + tokenize(map_names) layers[key] = {key: map_keys} dependencies[key] = set(map_names) store_dsk = HighLevelGraph(layers, dependencies) return Delayed(key, store_dsk) ","def store( sources: Array | Collection[Array], targets: Array | Collection[Array], lock: bool | Lock = True, regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None, compute: bool = True, return_stored: bool = False, **kwargs, ): """"""Store dask arrays in array-like objects, overwrite data in target This stores dask arrays into object that supports numpy-style setitem indexing. It stores values chunk by chunk so that it does not have to fill up memory. For best performance you can align the block size of the storage target with the block size of your array. If your data fits in memory then you may prefer calling ``np.array(myarray)`` instead. Parameters ---------- sources: Array or collection of Arrays targets: array-like or Delayed or collection of array-likes and/or Delayeds These should support setitem syntax ``target[10:20] = ...`` lock: boolean or threading.Lock, optional Whether or not to lock the data stores while storing. Pass True (lock each file individually), False (don't lock) or a particular :class:`threading.Lock` object to be shared among all writes. regions: tuple of slices or collection of tuples of slices Each ``region`` tuple in ``regions`` should be such that ``target[region].shape = source.shape`` for the corresponding source and target in sources and targets, respectively. If this is a tuple, the contents will be assumed to be slices, so do not provide a tuple of tuples. compute: boolean, optional If true compute immediately; return :class:`dask.delayed.Delayed` otherwise. return_stored: boolean, optional Optionally return the stored result (default False). kwargs: Parameters passed to compute/persist (only used if compute=True) Returns ------- If return_stored=True tuple of Arrays If return_stored=False and compute=True None If return_stored=False and compute=False Delayed Examples -------- >>> import h5py # doctest: +SKIP >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP >>> dset = f.create_dataset('/data', shape=x.shape, ... chunks=x.chunks, ... dtype='f8') # doctest: +SKIP >>> store(x, dset) # doctest: +SKIP Alternatively store many arrays at the same time >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP """""" if isinstance(sources, Array): sources = [sources] if isinstance(targets, Array): targets = [targets] if any(not isinstance(s, Array) for s in sources): raise ValueError(""All sources must be dask array objects"") if len(sources) != len(targets): raise ValueError( ""Different number of sources [%d] and targets [%d]"" % (len(sources), len(targets)) ) if isinstance(regions, tuple) or regions is None: regions = [regions] # type: ignore if len(sources) > 1 and len(regions) == 1: regions *= len(sources) # type: ignore if len(sources) != len(regions): raise ValueError( ""Different number of sources [%d] and targets [%d] than regions [%d]"" % (len(sources), len(targets), len(regions)) ) # Optimize all sources together sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources]) sources_layer = Array.__dask_optimize__( sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources])) ) sources_name = ""store-sources-"" + tokenize(sources) layers = {sources_name: sources_layer} dependencies: dict[str, set] = {sources_name: set()} # Optimize all targets together targets_keys = [] targets_dsks = [] for t in targets: if isinstance(t, Delayed): targets_keys.append(t.key) targets_dsks.append(t.__dask_graph__()) elif is_dask_collection(t): raise TypeError(""Targets must be either Delayed objects or array-likes"") if targets_dsks: targets_hlg = HighLevelGraph.merge(*targets_dsks) targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys) targets_name = ""store-targets-"" + tokenize(targets_keys) layers[targets_name] = targets_layer dependencies[targets_name] = set() load_stored = return_stored and not compute map_names = [ ""store-map-"" + tokenize(s, t if isinstance(t, Delayed) else id(t), r) for s, t, r in zip(sources, targets, regions) ] map_keys: list = [] for s, t, n, r in zip(sources, targets, map_names, regions): map_layer = insert_to_ooc( keys=s.__dask_keys__(), chunks=s.chunks, out=t.key if isinstance(t, Delayed) else t, name=n, lock=lock, region=r, return_stored=return_stored, load_stored=load_stored, ) layers[n] = map_layer if isinstance(t, Delayed): dependencies[n] = {sources_name, targets_name} else: dependencies[n] = {sources_name} map_keys += map_layer.keys() if return_stored: store_dsk = HighLevelGraph(layers, dependencies) load_store_dsk: HighLevelGraph | Mapping = store_dsk if compute: store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys] store_dlyds = persist(*store_dlyds, **kwargs) store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds]) load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2) map_names = [""load-"" + n for n in map_names] return tuple( Array(load_store_dsk, n, s.chunks, meta=s) for s, n in zip(sources, map_names) ) elif compute: store_dsk = HighLevelGraph(layers, dependencies) compute_as_if_collection(Array, store_dsk, map_keys, **kwargs) return None else: key = ""store-"" + tokenize(map_names) layers[key] = {key: map_keys} dependencies[key] = set(map_names) store_dsk = HighLevelGraph(layers, dependencies) return Delayed(key, store_dsk) " 41733,"def delete_study( study_name, # type: str storage, # type: Union[str, storages.BaseStorage] ): # type: (...) -> None """"""Delete a :class:`~optuna.study.Study` object. Args: study_name: Study's name. storage: Database URL such as ``sqlite:///example.db``. Please see also the documentation of :func:`~optuna.study.create_study` for futhre details. """""" storage = storages.get_storage(storage) study_id = storage.get_study_id_from_name(study_name) storage.delete_study(study_id) ","def delete_study( study_name, # type: str storage, # type: Union[str, storages.BaseStorage] ): # type: (...) -> None """"""Delete a :class:`~optuna.study.Study` object. Args: study_name: Study's name. storage: Database URL such as ``sqlite:///example.db``. Please see also the documentation of :func:`~optuna.study.create_study` for further details. """""" storage = storages.get_storage(storage) study_id = storage.get_study_id_from_name(study_name) storage.delete_study(study_id) " 19494,"def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False): """""" Check if concrete spec exists on mirrors and return a list indicating the mirrors on which it can be found Args: spec (spack.spec.Spec): The spec to look for in binary mirrors mirrors_to_check (dict): Optionally override the configured mirrors with the mirrors in this dictionary. index_only (bool): Do not attempt direct fetching of ``spec.json`` files from remote mirrors, only consider the indices. Return: A list of objects, each containing a ``mirror_url`` and ``spec`` key indicating all mirrors where the spec can be found. """""" if spec is None: return [] if not spack.mirror.MirrorCollection(mirrors=mirrors_to_check): tty.debug(""No Spack mirrors are currently configured"") return {} results = binary_index.find_built_spec(spec, mirrors_to_check=mirrors_to_check) # The index may be out-of-date. If we are't only considering indices, try # to fetch directly since we know where the file should be. if not results and not index_only: results = try_direct_fetch(spec, mirrors=mirrors_to_check) # We found a spec by the direct fetch approach, we might as well # add it to our mapping. if results: binary_index.update_spec(spec, results) return results ","def get_mirrors_for_spec(spec=None, mirrors_to_check=None, index_only=False): """""" Check if concrete spec exists on mirrors and return a list indicating the mirrors on which it can be found Args: spec (spack.spec.Spec): The spec to look for in binary mirrors mirrors_to_check (dict): Optionally override the configured mirrors with the mirrors in this dictionary. index_only (bool): Do not attempt direct fetching of ``spec.json`` files from remote mirrors, only consider the indices. Return: A list of objects, each containing a ``mirror_url`` and ``spec`` key indicating all mirrors where the spec can be found. """""" if spec is None: return [] if not spack.mirror.MirrorCollection(mirrors=mirrors_to_check): tty.debug(""No Spack mirrors are currently configured"") return {} results = binary_index.find_built_spec(spec, mirrors_to_check=mirrors_to_check) # The index may be out-of-date. If we aren't only considering indices, try # to fetch directly since we know where the file should be. if not results and not index_only: results = try_direct_fetch(spec, mirrors=mirrors_to_check) # We found a spec by the direct fetch approach, we might as well # add it to our mapping. if results: binary_index.update_spec(spec, results) return results " 16135,"def get_ip_prefix_from_adapters(local_ip: str, adapters) -> int | None: """"""Find the network prefix for an adapter."""""" for adapter in adapters: for ip_cfg in adapter.ips: if local_ip == ip_cfg.ip: return ip_cfg.network_prefix return None ","def get_ip_prefix_from_adapters(local_ip: str, adapters) -> int: """"""Find the network prefix for an adapter."""""" for adapter in adapters: for ip_cfg in adapter.ips: if local_ip == ip_cfg.ip: return ip_cfg.network_prefix return None " 38299,"def binary_ufunc_comparison(ufunc, a, b): if ufunc in [np.divmod]: out = (a.copy(), a.copy()) else: out = a.copy() if ufunc in yield_np_ufuncs([ 'add', 'subtract', 'remainder', 'fmod', 'mod', 'arctan2', 'hypot', 'greater', 'greater_equal', 'less', 'less_equal', 'equal', 'not_equal', 'logical_and', 'logical_or', 'logical_xor', 'maximum', 'minimum', 'fmax', 'fmin', 'nextafter', 'heaviside']): if a.units != b.units and a.units.dimensions == b.units.dimensions: if LooseVersion(np.__version__) < LooseVersion('1.13.0'): assert_raises(YTUfuncUnitError, ufunc, a, b) return elif a.units != b.units: assert_raises(YTUnitOperationError, ufunc, a, b) return if ufunc in yield_np_ufuncs( ['bitwise_and', 'bitwise_or', 'bitwise_xor', 'left_shift', 'right_shift', 'ldexp']): assert_raises(TypeError, ufunc, a, b) return ret = ufunc(a, b, out=out) ret = ufunc(a, b) if ufunc is np.multiply: assert_true(ret.units == a.units*b.units) elif ufunc in (np.divide, np.true_divide, np.arctan2): assert_true(ret.units.dimensions == (a.units/b.units).dimensions) elif ufunc in (np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal, np.logical_and, np.logical_or, np.logical_xor): assert_true(not isinstance(ret, YTArray) and isinstance(ret, np.ndarray)) if isinstance(ret, tuple): assert isinstance(out, tuple) assert len(out) == len(ret) for o, r in zip(out, ret): assert_array_equal(r, o) else: assert_array_equal(ret, out) if (ufunc in (np.divide, np.true_divide, np.arctan2) and (a.units.dimensions == b.units.dimensions)): assert_array_almost_equal( np.array(ret), ufunc(np.array(a.in_cgs()), np.array(b.in_cgs()))) elif LooseVersion(np.__version__) < LooseVersion('1.13.0'): assert_array_almost_equal(np.array(ret), ufunc(np.array(a), np.array(b))) ","def binary_ufunc_comparison(ufunc, a, b): if ufunc in [np.divmod]: out = (a.copy(), a.copy()) else: out = a.copy() if ufunc in yield_np_ufuncs([ 'add', 'subtract', 'remainder', 'fmod', 'mod', 'arctan2', 'hypot', 'greater', 'greater_equal', 'less', 'less_equal', 'equal', 'not_equal', 'logical_and', 'logical_or', 'logical_xor', 'maximum', 'minimum', 'fmax', 'fmin', 'nextafter', 'heaviside']): if a.units != b.units and a.units.dimensions == b.units.dimensions: if LooseVersion(np.__version__) < LooseVersion('1.13.0'): assert_raises(YTUfuncUnitError, ufunc, a, b) return elif a.units != b.units: assert_raises(YTUnitOperationError, ufunc, a, b) return if ufunc in yield_np_ufuncs( ['bitwise_and', 'bitwise_or', 'bitwise_xor', 'left_shift', 'right_shift', 'ldexp']): assert_raises(TypeError, ufunc, a, b) return ret = ufunc(a, b, out=out) ret = ufunc(a, b) if ufunc is np.multiply: assert_true(ret.units == a.units*b.units) elif ufunc in (np.divide, np.true_divide, np.arctan2): assert_true(ret.units.dimensions == (a.units/b.units).dimensions) elif ufunc in (np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal, np.logical_and, np.logical_or, np.logical_xor): assert_true(not isinstance(ret, YTArray) and isinstance(ret, np.ndarray)) if isinstance(ret, tuple): assert isinstance(out, tuple) assert len(out) == len(ret) for o, r in zip(out, ret): assert_array_equal(o, r) else: assert_array_equal(ret, out) if (ufunc in (np.divide, np.true_divide, np.arctan2) and (a.units.dimensions == b.units.dimensions)): assert_array_almost_equal( np.array(ret), ufunc(np.array(a.in_cgs()), np.array(b.in_cgs()))) elif LooseVersion(np.__version__) < LooseVersion('1.13.0'): assert_array_almost_equal(np.array(ret), ufunc(np.array(a), np.array(b))) " 57943,"def get_email_recipients(email_to, email_from, service_mail, mailbox): """"""Get the email recipient. Args: mailbox (str): The mailbox configured in the relevant integration email_to (str): The email receiver. email_from (str): The email's sender. service_mail (str): The mail listener. Returns: The email recipients. """""" email_to_set = {email_from} email_to = argToList(email_to) email_to_set = email_to_set.union(set(email_to)) recipients_to_remove = [] for recipient in email_to_set: if service_mail: recipients_to_remove.append(recipient) if service_mail in recipient else None if mailbox: recipients_to_remove.append(recipient) if mailbox in recipient else None for recipient_to_remove in recipients_to_remove: email_to_set.remove(recipient_to_remove) email_recipients = ','.join(email_to_set) return email_recipients ","def get_email_recipients(email_to, email_from, service_mail, mailbox): """"""Get the email recipient. Args: mailbox (str): The mailbox configured in the relevant integration email_to (str): The email receiver. email_from (str): The email's sender. service_mail (str): The mail listener. Returns: The email recipients. """""" email_to_set = {email_from} email_to = argToList(email_to) email_to_set = email_to_set.union(set(email_to)) if mailbox and mailbox in email_to_set: email_to_set.remove(mailbox) elif service_mail and service_mail in email_to_set: email_to_set.remove(service_mail) email_recipients = ','.join(email_to_set) return email_recipients " 49558,"def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional .. deprecated:: 1.6.0 This is equivalent to the `density` argument, but produces incorrect results for unequal bin widths. It should not be used. .. versionchanged:: 1.15.0 DeprecationWarnings are actually emitted. weights : array_like, optional A dask.array.Array of weights, of the same block structure as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins ","def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional .. deprecated:: 1.6.0 This is equivalent to the `density` argument, but produces incorrect results for unequal bin widths. It should not be used. .. versionchanged:: 1.15.0 DeprecationWarnings are actually emitted. weights : array_like, optional A dask.array.Array of weights, of the same block structure as ``a``. Each value in ``a`` only contributes its associated weight towards the bin count (instead of 1). If ``density`` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins " 39288,"def Box(bounds=(-1.,1.,-1.,1.,-1.,1.), level = 0, quads = True): """"""Create a box with solid faces for the given bounds. Parameters ---------- bounds : np.ndarray or list Specify the bounding box of the cube. ``(xMin,xMax, yMin,yMax, zMin,zMax)`` level : int Level of subdivision of the faces. quads : bool, optional Flag to tell the source to generate either a quad or two triangle for a set of four points. Default True """""" if np.array(bounds).size != 6: raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)') src = vtk.vtkTessellatedBoxSource() src.SetLevel(level) if quads is True: src.QuadsOn() else: src.QuadsOff() src.SetBounds(bounds) src.Update() return pyvista.wrap(src.GetOutput()) ","def Box(bounds=(-1.,1.,-1.,1.,-1.,1.), level = 0, quads = True): """"""Create a box with solid faces for the given bounds. Parameters ---------- bounds : np.ndarray or list Specify the bounding box of the cube. ``(xMin, xMax, yMin, yMax, zMin, zMax)`` level : int Level of subdivision of the faces. quads : bool, optional Flag to tell the source to generate either a quad or two triangle for a set of four points. Default True """""" if np.array(bounds).size != 6: raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)') src = vtk.vtkTessellatedBoxSource() src.SetLevel(level) if quads is True: src.QuadsOn() else: src.QuadsOff() src.SetBounds(bounds) src.Update() return pyvista.wrap(src.GetOutput()) " 21150,"def test_attr_pipeline_checks(en_vocab): doc1 = Doc(en_vocab, words=[""Test""]) doc1[0].dep_ = ""ROOT"" doc2 = Doc(en_vocab, words=[""Test""]) doc2[0].tag_ = ""TAG"" doc2[0].pos_ = ""X"" doc2[0].morph_ = ""Feat=Val"" doc2[0].lemma_ = ""LEMMA"" doc3 = Doc(en_vocab, words=[""Test""]) # DEP requires DEP matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""DEP"": ""a""}]]) matcher(doc1) with pytest.raises(ValueError): matcher(doc2) with pytest.raises(ValueError): matcher(doc3) # errors can be suppressed if desired matcher(doc2, suppress_errors=True) matcher(doc3, suppress_errors=True) # TAG, POS, LEMMA require those values for attr in (""TAG"", ""POS"", ""LEMMA""): matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{attr: ""a""}]]) matcher(doc2) with pytest.raises(ValueError): matcher(doc1) with pytest.raises(ValueError): matcher(doc3) # TEXT/ORTH only require tokens matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""ORTH"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""TEXT"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) ","def test_attr_pipeline_checks(en_vocab): doc1 = Doc(en_vocab, words=[""Test""]) doc1[0].dep_ = ""ROOT"" doc2 = Doc(en_vocab, words=[""Test""]) doc2[0].tag_ = ""TAG"" doc2[0].pos_ = ""X"" doc2[0].morph_ = ""Feat=Val"" doc2[0].lemma_ = ""LEMMA"" doc3 = Doc(en_vocab, words=[""Test""]) # DEP requires DEP matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""DEP"": ""a""}]]) matcher(doc1) with pytest.raises(ValueError): matcher(doc2) with pytest.raises(ValueError): matcher(doc3) # errors can be suppressed if desired matcher(doc2, suppress_errors=True) matcher(doc3, allow_missing=True) # TAG, POS, LEMMA require those values for attr in (""TAG"", ""POS"", ""LEMMA""): matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{attr: ""a""}]]) matcher(doc2) with pytest.raises(ValueError): matcher(doc1) with pytest.raises(ValueError): matcher(doc3) # TEXT/ORTH only require tokens matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""ORTH"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) matcher = Matcher(en_vocab) matcher.add(""TEST"", [[{""TEXT"": ""a""}]]) matcher(doc1) matcher(doc2) matcher(doc3) " 57102,"def delete_multi( namespace: str, sub_namespace: str | None, obj_ids: List[str] ) -> bool: """"""Deletes multiple ids in the cache. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT namespace for object ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). A list of id strings to delete from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """""" if len(obj_ids) == 0: return True if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] return bool(memory_cache_services.delete_multi(memcache_keys) == len(obj_ids)) # type: ignore[attr-defined] ","def delete_multi( namespace: str, sub_namespace: str | None, obj_ids: List[str] ) -> bool: """"""Deletes multiple ids in the cache. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT namespace for object ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). A list of id strings to delete from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """""" if len(obj_ids) == 0: return True if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] return bool(memory_cache_services.delete_multi(memcache_keys) == len(obj_ids)) # type: ignore[attr-defined] " 7739,"def deplete(chain, x, rates, dt, matrix_func=None): """"""Deplete materials using given reaction rates for a specified time Parameters ---------- chain : openmc.deplete.Chain Depletion chain x : list of numpy.ndarray Atom number vectors for each material rates : openmc.deplete.ReactionRates Reaction rates (from transport operator) dt : float Time in [s] to deplete for maxtrix_func : Callable, optional Function to form the depletion matrix after calling ``matrix_func(chain, rates, fission_yields)``, where ``fission_yields = {parent: {product: yield_frac}}`` Expected to return the depletion matrix required by :func:`CRAM48`. Returns ------- x_result : list of numpy.ndarray Updated atom number vectors for each material """""" if not hasattr(chain, ""fission_yields""): fission_yields = repeat(chain.get_thermal_fission_yields()) else: fission_yields = chain.fission_yields # Use multiprocessing pool to distribute work with Pool() as pool: iters = zip(repeat(chain), x, rates, repeat(dt), fission_yields, repeat(matrix_func)) x_result = list(pool.starmap(_cram_wrapper, iters)) return x_result ","def deplete(chain, x, rates, dt, matrix_func=None): """"""Deplete materials using given reaction rates for a specified time Parameters ---------- chain : openmc.deplete.Chain Depletion chain x : list of numpy.ndarray Atom number vectors for each material rates : openmc.deplete.ReactionRates Reaction rates (from transport operator) dt : float Time in [s] to deplete for maxtrix_func : Callable, optional Function to form the depletion matrix after calling ``matrix_func(chain, rates, fission_yields)``, where ``fission_yields = {parent: {product: yield_frac}}`` Expected to return the depletion matrix required by :func:`CRAM48`. Returns ------- x_result : list of numpy.ndarray Updated atom number vectors for each material """""" if not hasattr(chain, ""fission_yields""): fission_yields = repeat(chain.get_thermal_fission_yields()) else: fission_yields = repeat(chain.fission_yields) # Use multiprocessing pool to distribute work with Pool() as pool: iters = zip(repeat(chain), x, rates, repeat(dt), fission_yields, repeat(matrix_func)) x_result = list(pool.starmap(_cram_wrapper, iters)) return x_result " 9786,"def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), type=dict(type='str', choices=['host', 'user']), signing_key=dict(type='path'), public_key=dict(type='path'), path=dict(type='path', required=True), identifier=dict(type='str'), serial_number=dict(type='int', default=0), valid_from=dict(type='str'), valid_to=dict(type='str'), valid_at=dict(type='str'), principals=dict(type='list'), options=dict(type='list'), ), supports_check_mode=True, add_file_common_args=True, required_if=[('state', 'present', ['type', 'signing_key', 'public_key', 'valid_from', 'valid_to'])], ) def isBaseDir(path): base_dir = os.path.dirname(path) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) if module.params['state'] == ""present"": isBaseDir(module.params['signing_key']) isBaseDir(module.params['public_key']) isBaseDir(module.params['path']) certificate = Certificate(module) if certificate.state == 'present': if module.check_mode: certificate.changed = module.params['force'] or not certificate.is_valid(module) else: try: certificate.generate(module) except Exception as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: certificate.changed = os.path.exists(module.params['path']) if certificate.changed: certificate.cert_info = {} else: try: certificate.remove() except Exception as exc: module.fail_json(msg=to_native(exc)) result = certificate.dump() module.exit_json(**result) ","def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), type=dict(type='str', choices=['host', 'user']), signing_key=dict(type='path'), public_key=dict(type='path'), path=dict(type='path', required=True), identifier=dict(type='str'), serial_number=dict(type='int'), valid_from=dict(type='str'), valid_to=dict(type='str'), valid_at=dict(type='str'), principals=dict(type='list'), options=dict(type='list'), ), supports_check_mode=True, add_file_common_args=True, required_if=[('state', 'present', ['type', 'signing_key', 'public_key', 'valid_from', 'valid_to'])], ) def isBaseDir(path): base_dir = os.path.dirname(path) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) if module.params['state'] == ""present"": isBaseDir(module.params['signing_key']) isBaseDir(module.params['public_key']) isBaseDir(module.params['path']) certificate = Certificate(module) if certificate.state == 'present': if module.check_mode: certificate.changed = module.params['force'] or not certificate.is_valid(module) else: try: certificate.generate(module) except Exception as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: certificate.changed = os.path.exists(module.params['path']) if certificate.changed: certificate.cert_info = {} else: try: certificate.remove() except Exception as exc: module.fail_json(msg=to_native(exc)) result = certificate.dump() module.exit_json(**result) " 44214,"def gate_cost(n, norm, error, rank_r, rank_m, br=7, aleph=10, beth=20): r""""""Return the number of Toffoli gates needed to implement the double factorization method. Args: n (int): number of molecular orbitals norm (float): 1-norm of a second-quantized Hamiltonian error (float): target error in the algorithm rank_r (int): the rank of the first factorization step rank_m (int): the average rank of the second factorization step br (int): number of bits for ancilla qubit rotation aleph (int): number of bits for the keep register beth (int): number of bits for the rotation angles Returns: int: the number of Toffoli gates for the double factorization method **Example** >>> n = 14 >>> norm = 52.98761457453095 >>> error = 0.001 >>> rank_r = 26 >>> rank_m = 5.5 >>> br = 7 >>> aleph = 10 >>> beth = 20 >>> gate_cost(n, norm, error, rank_r, rank_m, br, aleph, beth) 167048631 """""" e_cost = estimation_cost(norm, error) u_cost = unitary_cost(n, rank_r, rank_m, br, aleph, beth) return int(e_cost * u_cost) ","def gate_cost(n, norm, error, rank_r, rank_m, br=7, aleph=10, beth=20): r""""""Return the total number of Toffoli gates needed to implement the double factorization algorithm. Args: n (int): number of molecular orbitals norm (float): 1-norm of a second-quantized Hamiltonian error (float): target error in the algorithm rank_r (int): the rank of the first factorization step rank_m (int): the average rank of the second factorization step br (int): number of bits for ancilla qubit rotation aleph (int): number of bits for the keep register beth (int): number of bits for the rotation angles Returns: int: the number of Toffoli gates for the double factorization method **Example** >>> n = 14 >>> norm = 52.98761457453095 >>> error = 0.001 >>> rank_r = 26 >>> rank_m = 5.5 >>> br = 7 >>> aleph = 10 >>> beth = 20 >>> gate_cost(n, norm, error, rank_r, rank_m, br, aleph, beth) 167048631 """""" e_cost = estimation_cost(norm, error) u_cost = unitary_cost(n, rank_r, rank_m, br, aleph, beth) return int(e_cost * u_cost) " 2947,"def eval( expr, parser=""pandas"", engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False, ): """""" Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements `__, only Python `expressions `__. parser : str, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance ` documentation for more details. engine : str or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance ` documentation for more details. """""" from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, ""inplace"") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != """"] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError( ""multi-line expressions are only valid in the "" ""context of data, use DataFrame.eval"" ) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope( level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target, ) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError( ""Multi-line expressions are only valid"" "" if all expressions contain an assignment"" ) elif inplace: raise ValueError(""Cannot operate inplace if there is no assignment"") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError(""Cannot return a copy of the target"") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError(""Cannot assign expression output to target"") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret ","def eval( expr, parser=""pandas"", engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False, ): """""" Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements `__, only Python `expressions `__. parser : str, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance ` documentation for more details. engine : {'python', 'numexpr'}, optional, default 'numexpr' The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance ` documentation for more details. """""" from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, ""inplace"") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != """"] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError( ""multi-line expressions are only valid in the "" ""context of data, use DataFrame.eval"" ) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope( level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target, ) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError( ""Multi-line expressions are only valid"" "" if all expressions contain an assignment"" ) elif inplace: raise ValueError(""Cannot operate inplace if there is no assignment"") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError(""Cannot return a copy of the target"") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError(""Cannot assign expression output to target"") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret " 39667,"def main(): module = ForemanEntityApypieAnsibleModule( argument_spec=dict( name=dict(type='str', required=True), updated_name=dict(type='str'), description=dict(type='str'), provider=dict(type='str', choices=['Vmware', 'libvirt', 'ovirt']), provider_params=dict(type='dict'), locations=dict(type='list'), organizations=dict(type='list'), provider_auth=dict(type='dict'), state=dict(default='present', choices=['absent', 'present_with_defaults', 'present']), ), required_if=( ['state', 'present', ['provider']], ), entity_spec=entity_spec, ) entity_dict = module.clean_params() if 'provider' in entity_dict: entity_dict['provider'] = entity_dict['provider'].title() provider_infos = get_provider_infos(provider=entity_dict.get('provider', '')) provider_params = entity_dict.pop('provider_params', dict()) provider_auth = entity_dict.pop('provider_auth', dict()) module.connect() entity = module.find_resource_by_name('compute_resources', name=entity_dict['name'], failsafe=True) if not module.desired_absent: if 'updated_name' in entity_dict: entity_dict['name'] = entity_dict['updated_name'] if 'organizations' in entity_dict: entity_dict['organizations'] = module.find_resources('organizations', entity_dict['organizations'], thin=True) if 'locations' in entity_dict: entity_dict['locations'] = module.find_resources('locations', entity_dict['locations'], thin=True) # Add provider specific params if not module.desired_absent: if not provider_infos and not entity: module.fail_json(msg='To create a compute resource a valid provider must be supplied') for key in provider_infos['params']: # Manage deprecated params if key in provider_auth: entity_dict[key] = provider_auth[key] if key in provider_params: entity_dict[key] = provider_params[key] changed = module.ensure_entity_state('compute_resources', entity_dict, entity, entity_spec=entity_spec) module.exit_json(changed=changed) ","def main(): module = ForemanEntityApypieAnsibleModule( argument_spec=dict( name=dict(type='str', required=True), updated_name=dict(type='str'), description=dict(type='str'), provider=dict(type='str', choices=['Vmware', 'libvirt', 'ovirt']), provider_params=dict(type='dict'), locations=dict(type='list'), organizations=dict(type='list'), provider_auth=dict(type='dict'), state=dict(default='present', choices=['absent', 'present_with_defaults', 'present']), ), required_if=( ['state', 'present', ['provider']], ), entity_spec=entity_spec, ) entity_dict = module.clean_params() if 'provider' in entity_dict: entity_dict['provider'] = entity_dict['provider'].title() provider_infos = get_provider_infos(provider=entity_dict.get('provider', '')) provider_params = entity_dict.pop('provider_params', dict()) provider_auth = entity_dict.pop('provider_auth', dict()) module.connect() entity = module.find_resource_by_name('compute_resources', name=entity_dict['name'], failsafe=True) if not module.desired_absent: if 'updated_name' in entity_dict: entity_dict['name'] = entity_dict['updated_name'] if 'organizations' in entity_dict: entity_dict['organizations'] = module.find_resources('organizations', entity_dict['organizations'], thin=True) if 'locations' in entity_dict: entity_dict['locations'] = module.find_resources('locations', entity_dict['locations'], thin=True) # Add provider specific params if not module.desired_absent: if not provider_infos and not entity: module.fail_json(msg='To create a compute resource a valid provider must be supplied') for key in provider_infos['params']: # Manage deprecated params if key in provider_auth: entity_dict[key] = provider_auth[key] if key in provider_params: entity_dict[key] = provider_params[key] changed = module.ensure_entity_state('compute_resources', entity_dict, entity) module.exit_json(changed=changed) " 24864,"def my_func(self): """"""This is a docstring. Returns ------- mymodule.Class An object """""" return mymodule.Class() ","def my_func(self): """"""finds_numpy_return_custom_class Returns ------- mymodule.Class An object """""" return mymodule.Class() " 32018,"def item_to_incident(client: Client, item: dict) -> list: files = [] if 'link' in item: link = item['link'] try: file_result = fileResult(path.basename(link), client._http_request('GET', full_url=link, resp_type='content')) files.append({ 'path': file_result['FileID'], 'name': file_result['File'] }) except Exception as e: demisto.debug(f""Failed fetching file from {link}. {str(e)}"") return [assign_params( name=item.get('title', ''), attachment=files, rawJSON=json.dumps(item) )] ","def item_to_incident(client: Client, item: dict) -> list: files = [] if link := item.get('link'): try: file_result = fileResult(path.basename(link), client._http_request('GET', full_url=link, resp_type='content')) files.append({ 'path': file_result['FileID'], 'name': file_result['File'] }) except Exception as e: demisto.debug(f""Failed fetching file from {link}. {str(e)}"") return [assign_params( name=item.get('title', ''), attachment=files, rawJSON=json.dumps(item) )] " 53441,"def divide(x, y): result = 0 try: result = x / y except ZeroDivisionError: raise Exception(""Can't divide by zero!"") from result # [bad-exception-context] return result ","def divide(x, y): result = 0 try: result = x / y except ZeroDivisionError: raise ValueError(f""Division by zero when dividing {x} by {y} !"") from result # [bad-exception-context] return result " 31385,"def get_whois(whois_string: str) -> defaultdict: """"""Gets a WHOIS string and returns a parsed dict of the WHOIS String. Args: whois_string: whois from domain api call Returns: A parsed whois Examples: >>> get_whois('key1:value\\nkey2:value2') defaultdict({'key1': 'value', 'key2': 'value2'}) """""" whois: defaultdict = defaultdict(lambda: None) for line in whois_string.splitlines(): key, value = line.split(sep=':', maxsplit=1) if key in whois: if not isinstance(whois[key], list): value = whois[key] whois[key] = list() whois[key].append(value) whois[key].append(value) else: whois[key] = value return whois ","def get_whois(whois_string: str) -> defaultdict: """"""Gets a WHOIS string and returns a parsed dict of the WHOIS String. Args: whois_string: whois from domain api call Returns: A parsed whois Examples: >>> get_whois('key1:value\\nkey2:value2') defaultdict({'key1': 'value', 'key2': 'value2'}) """""" whois: defaultdict = defaultdict(lambda: None) for line in whois_string.splitlines(): key, value = line.split(sep=':', maxsplit=1) if key in whois: if not isinstance(whois[key], list): whois[key] = [whois[key]] whois[key].append(value) else: whois[key] = value return whois " 22959,"def unpack(ext: str, source: Union[str, IO[bytes]], dest_path): """"""Unpack the archive |source| to |dest_path|. Args: ext (str): Extension of the archive. source (Union[str, IO[bytes]]): File handle or path to the source. dest_path ([type]): Destination path to unpack to. """""" close_source = False try: if isinstance(source, str): source = open(source, 'rb') close_source = True if ext == '.tar.gz' or ext == '.tgz': un_tar_directory(source, dest_path, 'gz') elif ext == '.tar.bz2': un_tar_directory(source, dest_path, 'bz2') elif ext == '.bz2': un_bz2_file(source, dest_path) elif ext == '.gz': with open(dest_path, 'wb') as f: shutil.copyfileobj(un_gzip_stream(source), f) elif ext == '.zip': unzip_directory(source, dest_path) else: raise UsageError('Not an archive.') except (tarfile.TarError, IOError) as e: logging.error(""Invalid archive upload: %s"", e) raise UsageError('Invalid archive upload.') finally: if close_source: cast(IO[bytes], source).close() ","def unpack(ext: str, source: Union[str, IO[bytes]], dest_path: str): """"""Unpack the archive |source| to |dest_path|. Args: ext (str): Extension of the archive. source (Union[str, IO[bytes]]): File handle or path to the source. dest_path ([type]): Destination path to unpack to. """""" close_source = False try: if isinstance(source, str): source = open(source, 'rb') close_source = True if ext == '.tar.gz' or ext == '.tgz': un_tar_directory(source, dest_path, 'gz') elif ext == '.tar.bz2': un_tar_directory(source, dest_path, 'bz2') elif ext == '.bz2': un_bz2_file(source, dest_path) elif ext == '.gz': with open(dest_path, 'wb') as f: shutil.copyfileobj(un_gzip_stream(source), f) elif ext == '.zip': unzip_directory(source, dest_path) else: raise UsageError('Not an archive.') except (tarfile.TarError, IOError) as e: logging.error(""Invalid archive upload: %s"", e) raise UsageError('Invalid archive upload.') finally: if close_source: cast(IO[bytes], source).close() " 25811,"def headmsk_wf(name='HeadMaskWorkflow', use_bet=True): """""" Computes a head mask as in [Mortamet2009]_. .. workflow:: from mriqc.workflows.anatomical import headmsk_wf wf = headmsk_wf() """""" has_dipy = False try: from dipy.denoise import nlmeans # noqa has_dipy = True except ImportError: pass workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_segm']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode') if use_bet or not has_dipy: from nipype.interfaces.fsl import BET # Alternative for when dipy is not installed bet = pe.Node(BET(surfaces=True), name='fsl_bet') workflow.connect([ (inputnode, bet, [('in_file', 'in_file')]), (bet, outputnode, [('outskin_mask_file', 'out_file')]) ]) else: from nipype.interfaces.dipy import Denoise enhance = pe.Node(niu.Function( input_names=['in_file'], output_names=['out_file'], function=_enhance), name='Enhance') estsnr = pe.Node(niu.Function( input_names=['in_file', 'seg_file'], output_names=['out_snr'], function=_estimate_snr), name='EstimateSNR') denoise = pe.Node(Denoise(), name='Denoise') gradient = pe.Node(niu.Function( input_names=['in_file', 'snr'], output_names=['out_file'], function=image_gradient), name='Grad') thresh = pe.Node(niu.Function( input_names=['in_file', 'in_segm'], output_names=['out_file'], function=gradient_threshold), name='GradientThreshold') workflow.connect([ (inputnode, estsnr, [('in_file', 'in_file'), ('in_segm', 'seg_file')]), (estsnr, denoise, [('out_snr', 'snr')]), (inputnode, enhance, [('in_file', 'in_file')]), (enhance, denoise, [('out_file', 'in_file')]), (estsnr, gradient, [('out_snr', 'snr')]), (denoise, gradient, [('out_file', 'in_file')]), (inputnode, thresh, [('in_segm', 'in_segm')]), (gradient, thresh, [('out_file', 'in_file')]), (thresh, outputnode, [('out_file', 'out_file')]) ]) return workflow ","def headmsk_wf(name='HeadMaskWorkflow', use_bet=True): """""" Computes a head mask as in [Mortamet2009]_. .. workflow:: from mriqc.workflows.anatomical import headmsk_wf wf = headmsk_wf() """""" has_dipy = False try: from dipy.denoise import nlmeans # noqa has_dipy = True except ImportError: pass workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_segm']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode') if use_bet: from nipype.interfaces.fsl import BET # Alternative for when dipy is not installed bet = pe.Node(BET(surfaces=True), name='fsl_bet') workflow.connect([ (inputnode, bet, [('in_file', 'in_file')]), (bet, outputnode, [('outskin_mask_file', 'out_file')]) ]) else: from nipype.interfaces.dipy import Denoise enhance = pe.Node(niu.Function( input_names=['in_file'], output_names=['out_file'], function=_enhance), name='Enhance') estsnr = pe.Node(niu.Function( input_names=['in_file', 'seg_file'], output_names=['out_snr'], function=_estimate_snr), name='EstimateSNR') denoise = pe.Node(Denoise(), name='Denoise') gradient = pe.Node(niu.Function( input_names=['in_file', 'snr'], output_names=['out_file'], function=image_gradient), name='Grad') thresh = pe.Node(niu.Function( input_names=['in_file', 'in_segm'], output_names=['out_file'], function=gradient_threshold), name='GradientThreshold') workflow.connect([ (inputnode, estsnr, [('in_file', 'in_file'), ('in_segm', 'seg_file')]), (estsnr, denoise, [('out_snr', 'snr')]), (inputnode, enhance, [('in_file', 'in_file')]), (enhance, denoise, [('out_file', 'in_file')]), (estsnr, gradient, [('out_snr', 'snr')]), (denoise, gradient, [('out_file', 'in_file')]), (inputnode, thresh, [('in_segm', 'in_segm')]), (gradient, thresh, [('out_file', 'in_file')]), (thresh, outputnode, [('out_file', 'out_file')]) ]) return workflow " 3896,"def numeric_assortativity_coefficient(G, attribute, nodes=None): """"""Compute assortativity for numerical node attributes. Assortativity measures the similarity of connections in the graph with respect to the given numeric attribute. Parameters ---------- G : NetworkX graph attribute : string Node attribute key. nodes: list or iterable (optional) Compute numeric assortativity only for attributes of nodes in container. The default is all nodes. Returns ------- r: float Assortativity of graph for given attribute Examples -------- >>> G = nx.Graph() >>> G.add_nodes_from([0, 1], size=2) >>> G.add_nodes_from([2, 3], size=3) >>> G.add_edges_from([(0, 1), (2, 3)]) >>> print(nx.numeric_assortativity_coefficient(G, ""size"")) 1.0 Notes ----- This computes Eq. (21) in Ref. [1]_ , which is the pearson correlation coefficient of the specified (scalar valued) attribute across edges. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 """""" if nodes is None: nodes = G.nodes vals = set(G.nodes[n][attribute] for n in nodes) mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return numeric_ac(M, mapping) ","def numeric_assortativity_coefficient(G, attribute, nodes=None): """"""Compute assortativity for numerical node attributes. Assortativity measures the similarity of connections in the graph with respect to the given numeric attribute. Parameters ---------- G : NetworkX graph attribute : string Node attribute key. nodes: list or iterable (optional) Compute numeric assortativity only for attributes of nodes in container. The default is all nodes. Returns ------- r: float Assortativity of graph for given attribute Examples -------- >>> G = nx.Graph() >>> G.add_nodes_from([0, 1], size=2) >>> G.add_nodes_from([2, 3], size=3) >>> G.add_edges_from([(0, 1), (2, 3)]) >>> print(nx.numeric_assortativity_coefficient(G, ""size"")) 1.0 Notes ----- This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation coefficient of the specified (scalar valued) attribute across edges. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 """""" if nodes is None: nodes = G.nodes vals = set(G.nodes[n][attribute] for n in nodes) mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return numeric_ac(M, mapping) " 58282,"def get_artist_tracks(artistUrl: str) -> List[SongObj]: """""" `str` `albumUrl` : Spotify Url of the artist whose tracks are to be retrieved returns a `list` containing Url's of each track in the artist profile """""" spotifyClient = SpotifyClient() artistTracks = [] offset = 0 artistResponse = spotifyClient.artist_albums(artistUrl) # while loop acts like do-while while True: for album in artistResponse[""items""]: # get albums and singles if not ( (album[""album_group""] == ""appears_on"") and (album[""album_type""] in [""album"", ""compilation""]) ): artistTracks.extend(get_album_tracks(album[""id""])) # get features from other artists albums elif ( album[""album_group""] == ""appears_on"" and album[""album_type""] == ""album"" ): trackResponse = spotifyClient.album_tracks(album[""uri""]) albumTracks = [] # while loop acts like do-while while True: for track in trackResponse[""items""]: for artist in track[""artists""]: if artist[""id""] == artistResponse[""href""].split(""/"")[-2]: song = SongObj.from_url( ""https://open.spotify.com/track/"" + track[""id""] ) if song.get_youtube_link() is not None: albumTracks.append(song) # check if more tracks are to be passed if trackResponse[""next""]: trackResponse = spotifyClient.album_tracks( album[""uri""], offset=len(albumTracks) ) else: break artistTracks.extend(albumTracks) offset += len(artistResponse[""items""]) # check if more albums are to be passed if artistResponse[""next""]: artistResponse = spotifyClient.artist_albums(artistUrl, offset=offset) else: break return artistTracks ","def get_artist_tracks(artistUrl: str) -> List[SongObj]: """""" `str` `albumUrl` : Spotify Url of the artist whose tracks are to be retrieved returns a `list` containing Url's of each track in the artist profile """""" spotifyClient = SpotifyClient() artistTracks = [] offset = 0 artistResponse = spotifyClient.artist_albums(artistUrl) # while loop acts like do-while while True: for album in artistResponse[""items""]: # get albums and singles if not ( (album[""album_group""] == ""appears_on"") and (album[""album_type""] in [""album"", ""compilation""]) ): artistTracks.extend(get_album_tracks(album[""id""])) # get features from other artists albums elif ( album[""album_group""] == ""appears_on"" and album[""album_type""] == ""album"" ): trackResponse = spotifyClient.album_tracks(album[""uri""]) albumTracks = [] # while loop acts like do-while while True: for track in trackResponse[""items""]: for artist in track[""artists""]: if artist[""id""] == artistResponse[""href""].split(""/"")[-2]: song = SongObj.from_url( ""https://open.spotify.com/track/"" + track[""id""] ) if song.get_youtube_link() is not None: albumTracks.append(song) # check if more tracks are to be passed if trackResponse[""next""]: trackResponse = spotifyClient.album_tracks( album[""uri""], offset=len(albumTracks) ) else: break artistTracks.extend(albumTracks) offset += len(artistResponse[""items""]) # check if more albums are to be passed if artistResponse[""next""]: artistResponse = spotifyClient.artist_albums(artistUrl, offset=offset) else: break return artistTracks " 19776,"def get_github_user(): keychain_class = CliRuntime().get_keychain_class() keychain = keychain_class( CliRuntime().project_config, CliRuntime().get_keychain_key() ) github_config = keychain.get_service(""github"") return github_config.username, (github_config.password or github_config.token) ","def get_github_user(): keychain_class = CliRuntime().get_keychain_class() keychain = keychain_class( CliRuntime().project_config, CliRuntime().get_keychain_key() ) github_config = keychain.get_service(""github"") return github_config.username, github_config.token " 42983,"def apply_gate_einsum(mat, state, pure, modes, n, trunc): """""" Gate application based on einsum. Assumes the input matrix has shape (out1, in1, ...) """""" # pylint: disable=unused-argument size = len(modes) if pure: if n == 1: return np.dot(mat, state) left_str = [indices[:size*2]] j = genOfRange(size) right_str = [indices[2*next(j) + 1] if i in modes else indices[size*2 + i] \ for i in range(n)] j = genOfRange(size) out_str = [indices[2*next(j)] if i in modes else indices[size*2 + i] \ for i in range(n)] einstring = ''.join(left_str + [','] + right_str + ['->'] + out_str) return np.einsum(einstring, mat, state) # otherwise, if state is mixed if n == 1: return np.dot(mat, np.dot(state, dagger(mat))) in_str = indices[:n*2] j = genOfRange(n*2) out_str = ''.join([indices[n*2 + next(j)] if i//2 in modes else indices[i] for i in range(n*2)]) j = genOfRange(size*2) left_str = ''.join([out_str[modes[i//2]*2] if (i%2) == 0 else in_str[modes[i//2]*2] for i in range(size*2)]) right_str = ''.join([out_str[modes[i//2]*2 + 1] if (i%2) == 0 else in_str[modes[i//2]*2 + 1] for i in range(size*2)]) einstring = ''.join([left_str, ',', in_str, ',', right_str, '->', out_str]) return np.einsum(einstring, mat, state, mat.conj()) ","def apply_gate_einsum(mat, state, pure, modes, n, trunc): """""" Gate application based on einsum. Assumes the input matrix has shape (out1, in1, ...) """""" # pylint: disable=unused-argument size = len(modes) if pure: if n == 1: return np.dot(mat, state) left_str = [indices[:size*2]] j = genOfRange(size) right_str = [indices[2*next(j) + 1] if i in modes else indices[size*2 + i] \ for i in range(n)] j = genOfRange(size) out_str = [indices[2*next(j)] if i in modes else indices[size*2 + i] \ for i in range(n)] einstring = ''.join(left_str + [','] + right_str + ['->'] + out_str) return np.einsum(einstring, mat, state) # otherwise, the state is mixed if n == 1: return np.dot(mat, np.dot(state, dagger(mat))) in_str = indices[:n*2] j = genOfRange(n*2) out_str = ''.join([indices[n*2 + next(j)] if i//2 in modes else indices[i] for i in range(n*2)]) j = genOfRange(size*2) left_str = ''.join([out_str[modes[i//2]*2] if (i%2) == 0 else in_str[modes[i//2]*2] for i in range(size*2)]) right_str = ''.join([out_str[modes[i//2]*2 + 1] if (i%2) == 0 else in_str[modes[i//2]*2 + 1] for i in range(size*2)]) einstring = ''.join([left_str, ',', in_str, ',', right_str, '->', out_str]) return np.einsum(einstring, mat, state, mat.conj()) " 19996,"def test_plantcv_transform_find_color_card(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir df, start, space = pcv.transform.find_color_card(img=rgb_img, threshold='adaptgauss', blurry=False) # Test with debug = ""print"" pcv.params.debug = ""print"" _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = ""plot"" pcv.params.debug = ""plot"" _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) ","def test_plantcv_transform_find_color_card(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) # Test cache directory cache_dir = os.path.join(TEST_TMPDIR, ""test_plantcv_transform_find_color_card"") os.mkdir(cache_dir) pcv.params.debug_outdir = cache_dir df, start, space = pcv.transform.find_color_card(rgb_img=rgb_img, threshold='adaptgauss', blurry=False) # Test with debug = ""print"" pcv.params.debug = ""print"" _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = ""plot"" pcv.params.debug = ""plot"" _ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) # Test with debug = None pcv.params.debug = None mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start, spacing=space, nrows=6, ncols=4, exclude=[20, 0]) assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220], dtype=np.uint8))) " 12378,"def remove_default_ca_certs(distro_name=None): """""" Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """""" util.delete_dir_contents(CA_CERT_PATH) util.delete_dir_contents(CA_CERT_SYSTEM_PATH) util.write_file(CA_CERT_CONFIG, """", mode=0o644) if distro_name is None or distro_name != 'alpine': debconf_sel = (""ca-certificates ca-certificates/trust_new_crts "" + ""select no"") subp.subp(('debconf-set-selections', '-'), debconf_sel) ","def remove_default_ca_certs(distro_name=None): """""" Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """""" util.delete_dir_contents(CA_CERT_PATH) util.delete_dir_contents(CA_CERT_SYSTEM_PATH) util.write_file(CA_CERT_CONFIG, """", mode=0o644) if distro_name != 'alpine': debconf_sel = (""ca-certificates ca-certificates/trust_new_crts "" + ""select no"") subp.subp(('debconf-set-selections', '-'), debconf_sel) " 30950,"def fetch_incidents(): params = demisto.params() user_key = params.get('queryUserKey') user_key = user_key if user_key else ADMIN_EMAIL query = '' if params['query'] is None else params['query'] last_run = demisto.getLastRun() last_fetch = last_run.get('gmt_time') # handle first time fetch - gets current GMT time -1 day if last_fetch is None: last_fetch, _ = parse_date_range(date_range=FETCH_TIME, utc=True, to_timestamp=False) last_fetch = str(last_fetch.isoformat()).split('.')[0] + 'Z' last_fetch = datetime.strptime(last_fetch, '%Y-%m-%dT%H:%M:%SZ') current_fetch = last_fetch service = get_service( 'gmail', 'v1', ['https://www.googleapis.com/auth/gmail.readonly'], user_key) query += last_fetch.strftime(' after:%Y/%m/%d') LOG('GMAIL: fetch parameters:\nuser: %s\nquery=%s\nfetch time: %s' % (user_key, query, last_fetch,)) result = service.users().messages().list( userId=user_key, maxResults=MAX_FETCH, q=query).execute() incidents = [] # so far, so good LOG('GMAIL: possible new incidents are %s' % (result,)) for msg in result.get('messages', []): msg_result = service.users().messages().get( id=msg['id'], userId=user_key).execute() incident = mail_to_incident(msg_result, service, user_key) temp_date = datetime.strptime( incident['occurred'], '%Y-%m-%dT%H:%M:%SZ') # update last run if temp_date > last_fetch: last_fetch = temp_date + timedelta(seconds=1) # avoid duplication due to weak time query if temp_date > current_fetch: incidents.append(incident) demisto.info('extract {} incidents'.format(len(incidents))) if params.get('isFetch'): return None demisto.setLastRun({'gmt_time': last_fetch.isoformat().split('.')[0] + 'Z'}) return incidents ","def fetch_incidents(): params = demisto.params() user_key = params.get('queryUserKey') user_key = user_key if user_key else ADMIN_EMAIL query = '' if params['query'] is None else params['query'] last_run = demisto.getLastRun() last_fetch = last_run.get('gmt_time') # handle first time fetch - gets current GMT time -1 day if last_fetch is None: last_fetch, _ = parse_date_range(date_range=FETCH_TIME, utc=True, to_timestamp=False) last_fetch = str(last_fetch.isoformat()).split('.')[0] + 'Z' last_fetch = datetime.strptime(last_fetch, '%Y-%m-%dT%H:%M:%SZ') current_fetch = last_fetch service = get_service( 'gmail', 'v1', ['https://www.googleapis.com/auth/gmail.readonly'], user_key) query += last_fetch.strftime(' after:%Y/%m/%d') LOG('GMAIL: fetch parameters:\nuser: %s\nquery=%s\nfetch time: %s' % (user_key, query, last_fetch,)) result = service.users().messages().list( userId=user_key, maxResults=MAX_FETCH, q=query).execute() incidents = [] # so far, so good LOG('GMAIL: possible new incidents are %s' % (result,)) for msg in result.get('messages', []): msg_result = service.users().messages().get( id=msg['id'], userId=user_key).execute() incident = mail_to_incident(msg_result, service, user_key) temp_date = datetime.strptime( incident['occurred'], '%Y-%m-%dT%H:%M:%SZ') # update last run if temp_date > last_fetch: last_fetch = temp_date + timedelta(seconds=1) # avoid duplication due to weak time query if temp_date > current_fetch: incidents.append(incident) demisto.info('extract {} incidents'.format(len(incidents))) if params.get('isFetch'): return [] demisto.setLastRun({'gmt_time': last_fetch.isoformat().split('.')[0] + 'Z'}) return incidents " 4730,"def main(): fig, ax = plt.subplots() years = np.arange(2004, 2009) heights = [7900, 8100, 7900, 6900, 9800] box_colors = [ (0.8, 0.2, 0.2), (0.2, 0.8, 0.2), (0.2, 0.2, 0.8), (0.7, 0.5, 0.8), (0.3, 0.8, 0.7), ] for year, h, bc in zip(years, heights, box_colors): bbox0 = Bbox.from_extents(year - 0.4, 0., year + 0.4, h) bbox = TransformedBbox(bbox0, ax.transData) ax.add_artist(RibbonBoxImage(ax, bbox, bc, interpolation=""bicubic"")) ax.annotate(str(h), (year, h), va=""bottom"", ha=""center"") ax.set_xlim(years[0] - 0.5, years[-1] + 0.5) ax.set_ylim(0, 10000) gradient = np.zeros((2, 2, 4)) gradient[:, :, :3] = [1, 1, 0] gradient[:, :, 3] = [[0.1, 0.3], [0.3, 0.5]] # alpha channel ax.imshow(gradient, interpolation=""bicubic"", zorder=0.1, extent=(0, 1, 0, 1), transform=ax.transAxes, aspect=""auto"") plt.show() ","def main(): fig, ax = plt.subplots() years = np.arange(2004, 2009) heights = [7900, 8100, 7900, 6900, 9200] box_colors = [ (0.8, 0.2, 0.2), (0.2, 0.8, 0.2), (0.2, 0.2, 0.8), (0.7, 0.5, 0.8), (0.3, 0.8, 0.7), ] for year, h, bc in zip(years, heights, box_colors): bbox0 = Bbox.from_extents(year - 0.4, 0., year + 0.4, h) bbox = TransformedBbox(bbox0, ax.transData) ax.add_artist(RibbonBoxImage(ax, bbox, bc, interpolation=""bicubic"")) ax.annotate(str(h), (year, h), va=""bottom"", ha=""center"") ax.set_xlim(years[0] - 0.5, years[-1] + 0.5) ax.set_ylim(0, 10000) gradient = np.zeros((2, 2, 4)) gradient[:, :, :3] = [1, 1, 0] gradient[:, :, 3] = [[0.1, 0.3], [0.3, 0.5]] # alpha channel ax.imshow(gradient, interpolation=""bicubic"", zorder=0.1, extent=(0, 1, 0, 1), transform=ax.transAxes, aspect=""auto"") plt.show() " 43381,"def CVNeuralNetLayer(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires=None): """"""pennylane.template.CVNeuralNetLayer(theta_1, phi_1, s, theta_2, phi_2, r, k, wires) A single layer of a CV Quantum Neural Network Implements a single layer from the the CV Quantum Neural Network (CVQNN) architecture of :cite:`killoran2018continuous` over :math:`N` wires. .. note:: The CV neural network architecture includes :class:`~.Kerr` operations. Make sure to use a suitable device, such as the :code:`strawberryfields.fock` device of the `PennyLane-SF `_ plugin. Args: theta_1 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for first interferometer phi_1 (array[float]): length :math:`N(N-1)/2` array of phase angles for first interferometer varphi_1 (array[float]): length :math:`N` array of rotation angles for first interferometer r (array[float]): length :math:`N` arrays of squeezing amounts for :class:`~.Squeezing` operations phi_r (array[float]): length :math:`N` arrays of squeezing angles for :class:`~.Squeezing` operations theta_2 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for second interferometer phi_2 (array[float]): length :math:`N(N-1)/2` array of phase angles for second interferometer varphi_2 (array[float]): length :math:`N` array of rotation angles for second interferometer a (array[float]): length :math:`N` arrays of displacement magnitudes for :class:`~.Displacement` operations phi_a (array[float]): length :math:`N` arrays of displacement angles for :class:`~.Displacement` operations k (array[float]): length :math:`N` arrays of kerr parameters for :class:`~.Kerr` operations Keyword Args: wires (Sequence[int]): wires the layer should act on """""" Interferometer(theta=theta_1, phi=phi_1, varphi=varphi_1, wires=wires) for i, wire in enumerate(wires): Squeezing(r[i], phi_r[i], wires=wire) Interferometer(theta=theta_2, phi=phi_2, varphi=varphi_2, wires=wires) for i, wire in enumerate(wires): Displacement(a[i], phi_a[i], wires=wire) for i, wire in enumerate(wires): Kerr(k[i], wires=wire) ","def CVNeuralNetLayer(theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires=None): """"""pennylane.template.CVNeuralNetLayer(theta_1, phi_1, s, theta_2, phi_2, r, k, wires) A single layer of a CV Quantum Neural Network Implements a single layer from the the CV Quantum Neural Network (CVQNN) architecture of :cite:`killoran2018continuous` over :math:`N` wires. .. note:: The CV neural network architecture includes :class:`~.Kerr` operations. Make sure to use a suitable device, such as the :code:`strawberryfields.fock` device of the `PennyLane-SF `_ plugin. Args: theta_1 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for first interferometer phi_1 (array[float]): length :math:`N(N-1)/2` array of phase angles for first interferometer varphi_1 (array[float]): length :math:`N` array of final rotation angles for first interferometer r (array[float]): length :math:`N` arrays of squeezing amounts for :class:`~.Squeezing` operations phi_r (array[float]): length :math:`N` arrays of squeezing angles for :class:`~.Squeezing` operations theta_2 (array[float]): length :math:`N(N-1)/2` array of transmittivity angles for second interferometer phi_2 (array[float]): length :math:`N(N-1)/2` array of phase angles for second interferometer varphi_2 (array[float]): length :math:`N` array of rotation angles for second interferometer a (array[float]): length :math:`N` arrays of displacement magnitudes for :class:`~.Displacement` operations phi_a (array[float]): length :math:`N` arrays of displacement angles for :class:`~.Displacement` operations k (array[float]): length :math:`N` arrays of kerr parameters for :class:`~.Kerr` operations Keyword Args: wires (Sequence[int]): wires the layer should act on """""" Interferometer(theta=theta_1, phi=phi_1, varphi=varphi_1, wires=wires) for i, wire in enumerate(wires): Squeezing(r[i], phi_r[i], wires=wire) Interferometer(theta=theta_2, phi=phi_2, varphi=varphi_2, wires=wires) for i, wire in enumerate(wires): Displacement(a[i], phi_a[i], wires=wire) for i, wire in enumerate(wires): Kerr(k[i], wires=wire) " 3247,"def convert_search_boolean_to_snuba_query(terms, params=None): if len(terms) == 1: return convert_snuba_condition_to_function(terms[0], params) # Filter out any ANDs since we can assume anything without an OR is an AND. Also do some # basic sanitization of the query: can't have two operators next to each other, and can't # start or end a query with an operator. prev = None new_terms = [] for term in terms: if prev: if SearchBoolean.is_operator(prev) and SearchBoolean.is_operator(term): raise InvalidSearchQuery( u""cannot have two conditions next to each other: {} {}"".format(prev, term) ) else: if SearchBoolean.is_operator(term): raise InvalidSearchQuery(u""condition is missing on left side of {}"".format(term)) if term != SearchBoolean.BOOLEAN_AND: new_terms.append(term) prev = term if SearchBoolean.is_operator(term): raise InvalidSearchQuery(u""condition is missing on right side of {}"".format(term)) terms = new_terms # We put precedence on AND, which sort of counter-intuitevely means we have to split the query # on ORs first, so the ANDs are grouped together. Search through the query for ORs and split the # query on each OR. # We want to maintain a binary tree, so split the terms on the first OR we can find and recurse on # the two sides. If there is no OR, split the first element out to AND index = None lhs, rhs = None, None operator = None try: index = terms.index(SearchBoolean.BOOLEAN_OR) lhs, rhs = terms[:index], terms[index + 1 :] operator = SNUBA_OR except Exception: lhs, rhs = terms[:1], terms[1:] operator = SNUBA_AND ( lhs_condition, lhs_having, projects_to_filter, group_ids, ) = convert_search_boolean_to_snuba_query(lhs, params) ( rhs_condition, rhs_having, rhs_projects_to_filter, rhs_group_ids, ) = convert_search_boolean_to_snuba_query(rhs, params) projects_to_filter.extend(rhs_projects_to_filter) group_ids.extend(rhs_group_ids) if operator == SNUBA_OR and (lhs_condition or rhs_condition) and (lhs_having or rhs_having): raise InvalidSearchQuery( u""Having an OR between aggregate filters and normal filters is invalid."" ) condition, having = None, None if lhs_condition or rhs_condition: args = filter(None, [lhs_condition, rhs_condition]) condition = [operator, args] if args else None if lhs_having or rhs_having: args = filter(None, [lhs_having, rhs_having]) having = [operator, args] if args else None return condition, having, projects_to_filter, group_ids ","def convert_search_boolean_to_snuba_query(terms, params=None): if len(terms) == 1: return convert_snuba_condition_to_function(terms[0], params) # Filter out any ANDs since we can assume anything without an OR is an AND. Also do some # basic sanitization of the query: can't have two operators next to each other, and can't # start or end a query with an operator. prev = None new_terms = [] for term in terms: if prev: if SearchBoolean.is_operator(prev) and SearchBoolean.is_operator(term): raise InvalidSearchQuery( u""cannot have two operator conditions next to each other: {} {}"".format(prev, term) ) else: if SearchBoolean.is_operator(term): raise InvalidSearchQuery(u""condition is missing on left side of {}"".format(term)) if term != SearchBoolean.BOOLEAN_AND: new_terms.append(term) prev = term if SearchBoolean.is_operator(term): raise InvalidSearchQuery(u""condition is missing on right side of {}"".format(term)) terms = new_terms # We put precedence on AND, which sort of counter-intuitevely means we have to split the query # on ORs first, so the ANDs are grouped together. Search through the query for ORs and split the # query on each OR. # We want to maintain a binary tree, so split the terms on the first OR we can find and recurse on # the two sides. If there is no OR, split the first element out to AND index = None lhs, rhs = None, None operator = None try: index = terms.index(SearchBoolean.BOOLEAN_OR) lhs, rhs = terms[:index], terms[index + 1 :] operator = SNUBA_OR except Exception: lhs, rhs = terms[:1], terms[1:] operator = SNUBA_AND ( lhs_condition, lhs_having, projects_to_filter, group_ids, ) = convert_search_boolean_to_snuba_query(lhs, params) ( rhs_condition, rhs_having, rhs_projects_to_filter, rhs_group_ids, ) = convert_search_boolean_to_snuba_query(rhs, params) projects_to_filter.extend(rhs_projects_to_filter) group_ids.extend(rhs_group_ids) if operator == SNUBA_OR and (lhs_condition or rhs_condition) and (lhs_having or rhs_having): raise InvalidSearchQuery( u""Having an OR between aggregate filters and normal filters is invalid."" ) condition, having = None, None if lhs_condition or rhs_condition: args = filter(None, [lhs_condition, rhs_condition]) condition = [operator, args] if args else None if lhs_having or rhs_having: args = filter(None, [lhs_having, rhs_having]) having = [operator, args] if args else None return condition, having, projects_to_filter, group_ids " 36260,"def _regress_out_chunk(data): # data is a tuple containing the selected columns from adata.X # and the regressors dataFrame data_chunk = data[0] regressors = data[1] variable_is_categorical = data[2] responses_chunk_list = [] import statsmodels.api as sm from statsmodels.tools.sm_exceptions import PerfectSeparationError for col_index in range(data_chunk.shape[1]): # if all values are identical, the statsmodel.api.GLM throws an error; but then no regression is necessary anyways... if (data_chunk[:, col_index] != data_chunk[0, col_index]).sum() == 0: responses_chunk_list.append(data_chunk[:, col_index]) continue if variable_is_categorical: regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]] else: regres = regressors try: result = sm.GLM(data_chunk[:, col_index], regres, family=sm.families.Gaussian()).fit() new_column = result.resid_response except PerfectSeparationError: # this emulates R's behavior logg.warning('Encountered PerfectSeparationError, setting to 0 as in R.') new_column = np.zeros(data_chunk.shape[0]) responses_chunk_list.append(new_column) return np.vstack(responses_chunk_list) ","def _regress_out_chunk(data): # data is a tuple containing the selected columns from adata.X # and the regressors dataFrame data_chunk = data[0] regressors = data[1] variable_is_categorical = data[2] responses_chunk_list = [] import statsmodels.api as sm from statsmodels.tools.sm_exceptions import PerfectSeparationError for col_index in range(data_chunk.shape[1]): # if all values are identical, the statsmodel.api.GLM throws an error; but then no regression is necessary anyways... if not (data_chunk[:, col_index] != data_chunk[0, col_index]).any(): responses_chunk_list.append(data_chunk[:, col_index]) continue if variable_is_categorical: regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]] else: regres = regressors try: result = sm.GLM(data_chunk[:, col_index], regres, family=sm.families.Gaussian()).fit() new_column = result.resid_response except PerfectSeparationError: # this emulates R's behavior logg.warning('Encountered PerfectSeparationError, setting to 0 as in R.') new_column = np.zeros(data_chunk.shape[0]) responses_chunk_list.append(new_column) return np.vstack(responses_chunk_list) " 31621,"def main(): params = demisto.params() command = demisto.command() demisto.info(f'Command being called is {command}') try: client = Client(server_url=params.get('server_url'), use_ssl=not params.get('insecure', False), proxy=params.get('proxy'), feed_tags=argToList(params.get('feedTags')), tlp_color=params.get('tlp_color'), content_max_size=int(params.get('max_size', '45'))) client.create_indicators_from_response() if demisto.command() == 'test-module': # if the client was created successfully and there is data in feed the test is successful. return_results(""ok"") elif demisto.command() == 'rss-get-indicators': return_results(get_indicators(client, demisto.args())) elif demisto.command() == 'fetch-indicators': for iter_ in batch(client.parsed_indicators, batch_size=2000): demisto.createIndicators(iter_) else: raise NotImplementedError(f'Command {command} is not implemented.') # Log exceptions and return errors except ValueError: raise DemistoException(""Article content max size must be a number, for example 50."") except Exception as err: demisto.error(traceback.format_exc()) # print the traceback return_error(f""Failed to execute {command} command.\nError:\n{str(err)}"") ","def main(): params = demisto.params() command = demisto.command() demisto.info(f'Command being called is {command}') try: client = Client(server_url=params.get('server_url'), use_ssl=not params.get('insecure', False), proxy=params.get('proxy'), feed_tags=argToList(params.get('feedTags')), tlp_color=params.get('tlp_color'), content_max_size=int(params.get('max_size', '45'))) client.create_indicators_from_response() if demisto.command() == 'test-module': # if the client was created successfully and there is data in feed the test is successful. return_results(""ok"") elif demisto.command() == 'rss-get-indicators': return_results(get_indicators(client, demisto.args())) elif command == 'fetch-indicators': for iter_ in batch(client.parsed_indicators, batch_size=2000): demisto.createIndicators(iter_) else: raise NotImplementedError(f'Command {command} is not implemented.') # Log exceptions and return errors except ValueError: raise DemistoException(""Article content max size must be a number, for example 50."") except Exception as err: demisto.error(traceback.format_exc()) # print the traceback return_error(f""Failed to execute {command} command.\nError:\n{str(err)}"") " 52708,"def update_global_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test=False, ): """"""Update the DB table global_threepid_associations so that all stored emails are casefolded, the signed association is re-signed and any duplicate mxid's associated with the given email are deleted. :return: None """""" # get every row where the local server is origin server and medium is email origin_server = sydent.server_name medium = ""email"" cur = db.cursor() res = cur.execute( ""SELECT address, mxid, sgAssoc FROM global_threepid_associations WHERE medium = ?"" ""AND originServer = ? ORDER BY ts DESC"", (medium, origin_server), ) # dict that stores email address with mxid, email address, lookup hash, and # signed association associations: Dict[str, List[Tuple[str, str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, re-sign the # associations and add to associations dict for address, mxid, sg_assoc in res.fetchall(): casefold_address = address.casefold() # rehash the email since hash functions are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) # update signed associations with new casefolded address and re-sign sg_assoc = json_decoder.decode(sg_assoc) sg_assoc[""address""] = address.casefold() sg_assoc = json.dumps( signedjson.sign.sign_json( sg_assoc, sydent.server_name, sydent.keyring.ed25519 ) ) if casefold_address in associations: associations[casefold_address].append( (address, mxid, lookup_hash, sg_assoc) ) else: associations[casefold_address] = [(address, mxid, lookup_hash, sg_assoc)] # list of arguments to update db with db_update_args: List[Tuple[Any, str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][3], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. for address, mxid, _, _ in assoc_tuples[1:]: to_delete.append((address,)) if not test: print( f""{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in global_threepid_associations"" ) if not dry_run: if len(to_delete) > 0: cur.executemany( ""DELETE FROM global_threepid_associations WHERE address = ?"", to_delete ) if len(db_update_args) > 0: cur.executemany( ""UPDATE global_threepid_associations SET address = ?, lookup_hash = ?, sgAssoc = ? WHERE address = ? AND mxid = ?"", db_update_args, ) db.commit() ","def update_global_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test: bool = False, ) -> None: """"""Update the DB table global_threepid_associations so that all stored emails are casefolded, the signed association is re-signed and any duplicate mxid's associated with the given email are deleted. :return: None """""" # get every row where the local server is origin server and medium is email origin_server = sydent.server_name medium = ""email"" cur = db.cursor() res = cur.execute( ""SELECT address, mxid, sgAssoc FROM global_threepid_associations WHERE medium = ?"" ""AND originServer = ? ORDER BY ts DESC"", (medium, origin_server), ) # dict that stores email address with mxid, email address, lookup hash, and # signed association associations: Dict[str, List[Tuple[str, str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, re-sign the # associations and add to associations dict for address, mxid, sg_assoc in res.fetchall(): casefold_address = address.casefold() # rehash the email since hash functions are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) # update signed associations with new casefolded address and re-sign sg_assoc = json_decoder.decode(sg_assoc) sg_assoc[""address""] = address.casefold() sg_assoc = json.dumps( signedjson.sign.sign_json( sg_assoc, sydent.server_name, sydent.keyring.ed25519 ) ) if casefold_address in associations: associations[casefold_address].append( (address, mxid, lookup_hash, sg_assoc) ) else: associations[casefold_address] = [(address, mxid, lookup_hash, sg_assoc)] # list of arguments to update db with db_update_args: List[Tuple[Any, str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][3], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. for address, mxid, _, _ in assoc_tuples[1:]: to_delete.append((address,)) if not test: print( f""{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in global_threepid_associations"" ) if not dry_run: if len(to_delete) > 0: cur.executemany( ""DELETE FROM global_threepid_associations WHERE address = ?"", to_delete ) if len(db_update_args) > 0: cur.executemany( ""UPDATE global_threepid_associations SET address = ?, lookup_hash = ?, sgAssoc = ? WHERE address = ? AND mxid = ?"", db_update_args, ) db.commit() " 21814,"def stop_cancellation(deferred: ""defer.Deferred[T]"") -> ""defer.Deferred[T]"": """"""Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`. Args: deferred: The `Deferred` to protect against cancellation. Must not follow the Synapse logcontext rules. Returns: A new `Deferred`, which will contain the result of the original `Deferred`, but will not propagate cancellation through to the original. When cancelled, the new `Deferred` will fail with a `CancelledError` and will not follow the Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap the new `Deferred`. """""" new_deferred: defer.Deferred[T] = defer.Deferred() deferred.addBoth(new_deferred.callback) return new_deferred ","def stop_cancellation(deferred: ""defer.Deferred[T]"") -> ""defer.Deferred[T]"": """"""Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`. Args: deferred: The `Deferred` to protect against cancellation. Must not follow the Synapse logcontext rules. Returns: A new `Deferred`, which will contain the result of the original `Deferred`, but will not propagate cancellation through to the original. When cancelled, the new `Deferred` will fail with a `CancelledError` and will not follow the Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap the new `Deferred`. """""" new_deferred: defer.Deferred[T] = defer.Deferred() deferred.chainDeferred(new_deferred) return new_deferred " 35284,"def symmetric_power_iteration(tensor, n_repeat=10, n_iteration=10, verbose=False): """"""A single Robust Symmetric Tensor Power Iteration Parameters ---------- tensor : tl.tensor input tensor to decompose, must be symmetric of shape (size, )*order n_repeat : int, default is 10 number of initializations to be tried n_iterations : int, default is 10 number of power iterations verbose : bool level of verbosity Returns ------- (eigenval, best_factor, deflated) eigenval : float the obtained eigenvalue best_factor : tl.tensor the best estimated eigenvector deflated : tl.tensor of same shape as `tensor` the deflated tensor (i.e. without the estimated component) """""" order = tl.ndim(tensor) size = tl.shape(tensor)[0] if not tl.shape(tensor) == (size, )*order: raise ValueError('The input tensor does not have the same size along each mode.') # A list of candidates for each mode best_score = -np.inf scores = [] modes = list(range(1, order)) for _ in range(n_repeat): factor = tl.tensor(np.random.random_sample(size), **tl.context(tensor)) for _ in range(n_iteration): for _ in range(order): factor = tl.tenalg.multi_mode_dot(tensor, [factor]*(order-1), modes=modes) factor = factor / tl.norm(factor, 2) score = tl.tenalg.multi_mode_dot(tensor, [factor]*order) scores.append(score) #round(score, 2)) if score > best_score: best_score = score best_factor = factor if verbose: print(f'Best score of {n_repeat}: {best_score}') # Refine the init for _ in range(n_iteration): for _ in range(order): best_factor = tl.tenalg.multi_mode_dot(tensor, [best_factor]*(order-1), modes=modes) best_factor = best_factor / tl.norm(best_factor, 2) eigenval = tl.tenalg.multi_mode_dot(tensor, [best_factor]*order) deflated = tensor - outer([best_factor]*order)*eigenval if verbose: explained = tl.norm(deflated)/tl.norm(tensor) print(f'Eigenvalue: {eigenval}, explained: {explained}') return eigenval, best_factor, deflated ","def symmetric_power_iteration(tensor, n_repeat=10, n_iteration=10, verbose=False): """"""A single Robust Symmetric Tensor Power Iteration Parameters ---------- tensor : tl.tensor input tensor to decompose, must be symmetric of shape (size, )*order n_repeat : int, default is 10 number of initializations to be tried n_iterations : int, default is 10 number of power iterations verbose : bool level of verbosity Returns ------- (eigenval, best_factor, deflated) eigenval : float the obtained eigenvalue best_factor : tl.tensor the best estimated eigenvector deflated : tl.tensor of same shape as `tensor` the deflated tensor (i.e. without the estimated component) """""" order = tl.ndim(tensor) size = tl.shape(tensor)[0] if not tl.shape(tensor) == (size, )*order: raise ValueError('The input tensor does not have the same size along each mode.') # A list of candidates for each mode best_score = -np.inf scores = [] modes = list(range(1, order)) for i in range(n_repeat): factor = tl.tensor(np.random.random_sample(size), **tl.context(tensor)) for _ in range(n_iteration): for _ in range(order): factor = tl.tenalg.multi_mode_dot(tensor, [factor]*(order-1), modes=modes) factor = factor / tl.norm(factor, 2) score = tl.tenalg.multi_mode_dot(tensor, [factor]*order) scores.append(score) #round(score, 2)) if score > best_score: best_score = score best_factor = factor if verbose: print(f'Best score of {n_repeat}: {best_score}') # Refine the init for _ in range(n_iteration): for _ in range(order): best_factor = tl.tenalg.multi_mode_dot(tensor, [best_factor]*(order-1), modes=modes) best_factor = best_factor / tl.norm(best_factor, 2) eigenval = tl.tenalg.multi_mode_dot(tensor, [best_factor]*order) deflated = tensor - outer([best_factor]*order)*eigenval if verbose: explained = tl.norm(deflated)/tl.norm(tensor) print(f'Eigenvalue: {eigenval}, explained: {explained}') return eigenval, best_factor, deflated " 20033,"def sizes(img, mask, num_objects=100): """""" Visualize an RGB image in all potential colorspaces Inputs: img = RGB or grayscale image data mask = Binary mask made from selected contours num_objects = Optional parameter to limit the number of objects that will get annotated. Returns: plotting_img = Plotting image containing the original image and L,A,B,H,S, and V colorspaces :param img: numpy.ndarray :param mask: numpy.ndarray :param num_objects: int :return plotting_img: numpy.ndarray """""" plotting_img = np.copy(img) # Convert grayscale images to color if len(np.shape(plotting_img)) == 2: plotting_img = cv2.cvtColor(plotting_img, cv2.COLOR_GRAY2BGR) # Store debug debug = params.debug params.debug = None # ID contours and sort them from largest to smallest id_objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] sorted_objects = sorted(id_objects, key=lambda x: cv2.contourArea(x)) # Function sorts smallest to largest so keep the last X objects listed sorted_objects = sorted_objects[len(sorted_objects) - num_objects : len(sorted_objects)] rand_color = color_palette(num=num_objects, saved=False) random.shuffle(rand_color) label_coord_x = [] label_coord_y = [] area_vals = [] for i, contour in enumerate(sorted_objects): # ID and store area values and centers of mass for labeling them m = cv2.moments(contour) area_vals.append(m['m00']) label_coord_x.append(int(m[""m10""] / m[""m00""])) label_coord_y.append(int(m[""m01""] / m[""m00""])) # Fill in objects with color cv2.drawContours(plotting_img, sorted_objects, i, rand_color[i], thickness=-1) # Label with area values for c, value in enumerate(area_vals): text = ""{:.0f}"".format(value) w = label_coord_x[c] h = label_coord_y[c] cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness) print(""There were "" + str(len(id_objects) - num_objects) + "" objects not annotated."") # Auto-increment device params.device += 1 # Reset debug mode params.debug = debug _debug(visual=plotting_img, filename=os.path.join(params.debug_outdir, str(params.device) +'_object_sizes.png')) return plotting_img ","def sizes(img, mask, num_objects=100): """""" Visualize an RGB image in all potential colorspaces Inputs: img = RGB or grayscale image data mask = Binary mask made from selected contours num_objects = Optional parameter to limit the number of objects that will get annotated. Returns: plotting_img = Plotting image containing the original image and L,A,B,H,S, and V colorspaces :param img: numpy.ndarray :param mask: numpy.ndarray :param num_objects: int :return plotting_img: numpy.ndarray """""" plotting_img = np.copy(img) # Convert grayscale images to color if len(np.shape(plotting_img)) == 2: plotting_img = cv2.cvtColor(plotting_img, cv2.COLOR_GRAY2BGR) # Store debug debug = params.debug params.debug = None # ID contours and sort them from largest to smallest id_objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] sorted_objects = sorted(id_objects, key=lambda x: cv2.contourArea(x)) # Function sorts smallest to largest so keep the last X objects listed sorted_objects = sorted_objects[len(sorted_objects) - num_objects : len(sorted_objects)] rand_color = color_palette(num=num_objects, saved=False) random.shuffle(rand_color) label_coord_x = [] label_coord_y = [] area_vals = [] for i, contour in enumerate(sorted_objects): # ID and store area values and centers of mass for labeling them m = cv2.moments(contour) area_vals.append(m['m00']) label_coord_x.append(int(m[""m10""] / m[""m00""])) label_coord_y.append(int(m[""m01""] / m[""m00""])) # Fill in objects with color cv2.drawContours(plotting_img, sorted_objects, i, rand_color[i], thickness=-1) # Label with area values for c, value in enumerate(area_vals): text = ""{:.0f}"".format(value) w = label_coord_x[c] h = label_coord_y[c] cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness) print(f""There were {len(id_objects) - num_objects} objects not annotated."") # Auto-increment device params.device += 1 # Reset debug mode params.debug = debug _debug(visual=plotting_img, filename=os.path.join(params.debug_outdir, str(params.device) +'_object_sizes.png')) return plotting_img " 22930,"def need_image_for_service(args, image): """"""Does `image` support a service we want to run."""""" for service, service_image in SERVICE_TO_IMAGE.items(): if should_run_service(args, service) and image == service_image: return True return False ","def need_image_for_service(args, image): """"""Does `image` support a service we want to run?"""""" for service, service_image in SERVICE_TO_IMAGE.items(): if should_run_service(args, service) and image == service_image: return True return False " 37089,"def plot_state_city(rho, title="""", figsize=None, color=None, alpha=1): """"""Plot the cityscape of quantum state. Plot two 3d bar graphs (two dimensional) of the real and imaginary part of the density matrix rho. Args: rho (ndarray): Numpy array for state vector or density matrix. title (str): a string that represents the plot title figsize (tuple): Figure size in inches. color (list): A list of len=2 giving colors for real and imaginary components of matrix elements. alpha (float): Transparency value for bars Returns: matplotlib.Figure: The matplotlib.Figure of the visualization Raises: ImportError: Requires matplotlib. ValueError: When 'color' is not a list of len=2. """""" if not HAS_MATPLOTLIB: raise ImportError('Must have Matplotlib installed.') rho = _validate_input_state(rho) num = int(np.log2(len(rho))) # get the real and imag parts of rho datareal = np.real(rho) dataimag = np.imag(rho) # get the labels column_names = [bin(i)[2:].zfill(num) for i in range(2**num)] row_names = [bin(i)[2:].zfill(num) for i in range(2**num)] lx = len(datareal[0]) # Work out matrix dimensions ly = len(datareal[:, 0]) xpos = np.arange(0, lx, 1) # Set up a mesh of positions ypos = np.arange(0, ly, 1) xpos, ypos = np.meshgrid(xpos+0.25, ypos+0.25) xpos = xpos.flatten() ypos = ypos.flatten() zpos = np.zeros(lx*ly) dx = 0.5 * np.ones_like(zpos) # width of bars dy = dx.copy() dzr = datareal.flatten() dzi = dataimag.flatten() if color is None: color = [""#648fff"", ""#648fff""] else: if len(color) != 2: raise ValueError(""'color' must be a list of len=2."") if color[0] is None: color[0] = ""#648fff"" if color[1] is None: color[1] = ""#648fff"" # set default figure size if figsize is None: figsize = (15, 5) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(1, 2, 1, projection='3d') x = [0, max(xpos)+0.5, max(xpos)+0.5, 0] y = [0, 0, max(ypos)+0.5, max(ypos)+0.5] z = [0, 0, 0, 0] verts = [list(zip(x, y, z))] fc1 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzr, color[0]) for idx, cur_zpos in enumerate(zpos): if dzr[idx] > 0: zorder = 2 else: zorder = 0 b1 = ax1.bar3d(xpos[idx], ypos[idx], cur_zpos, dx[idx], dy[idx], dzr[idx], alpha=alpha, zorder=zorder) b1.set_facecolors(fc1[6*idx:6*idx+6]) pc1 = Poly3DCollection(verts, alpha=0.15, facecolor='k', linewidths=1, zorder=1) if min(dzr) < 0 < max(dzr): ax1.add_collection3d(pc1) ax2 = fig.add_subplot(1, 2, 2, projection='3d') fc2 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzi, color[1]) for idx, cur_zpos in enumerate(zpos): if dzi[idx] > 0: zorder = 2 else: zorder = 0 b2 = ax2.bar3d(xpos[idx], ypos[idx], cur_zpos, dx[idx], dy[idx], dzi[idx], alpha=alpha, zorder=zorder) b2.set_facecolors(fc2[6*idx:6*idx+6]) pc2 = Poly3DCollection(verts, alpha=0.2, facecolor='k', linewidths=1, zorder=1) if min(dzi) < 0 < max(dzi): ax2.add_collection3d(pc2) ax1.set_xticks(np.arange(0.5, lx+0.5, 1)) ax1.set_yticks(np.arange(0.5, ly+0.5, 1)) max_dzr = max(dzr) min_dzr = min(dzr) min_dzi = np.min(dzi) max_dzi = np.max(dzi) if max_dzr != min_dzr: ax1.axes.set_zlim3d(np.min(dzr), max(np.max(dzr)+1e-9, np.max(dzi))) else: if min_dzr == 0: ax1.axes.set_zlim3d(np.min(dzr), max(np.max(dzr)+1e-9, np.max(dzi))) else: ax1.axes.set_zlim3d(auto=True) ax1.get_autoscalez_on() ax1.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45) ax1.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5) ax1.set_zlabel(r'Re[$\rho$]', fontsize=14) for tick in ax1.zaxis.get_major_ticks(): tick.label.set_fontsize(14) ax2.set_xticks(np.arange(0.5, lx+0.5, 1)) ax2.set_yticks(np.arange(0.5, ly+0.5, 1)) if min_dzi != max_dzi: eps = 0 ax2.axes.set_zlim3d(np.min(dzi), max(np.max(dzr)+1e-9, np.max(dzi)+eps)) else: if min_dzi == 0: ax2.set_zticks([0]) eps = 1e-9 ax2.axes.set_zlim3d(np.min(dzi), max(np.max(dzr)+1e-9, np.max(dzi)+eps)) else: ax2.axes.set_zlim3d(auto=True) ax2.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45) ax2.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5) ax2.set_zlabel(r'Im[$\rho$]', fontsize=14) for tick in ax2.zaxis.get_major_ticks(): tick.label.set_fontsize(14) ax2.get_autoscalez_on() plt.suptitle(title, fontsize=16) plt.tight_layout() if get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']: plt.close(fig) return fig ","def plot_state_city(rho, title="""", figsize=None, color=None, alpha=1): """"""Plot the cityscape of quantum state. Plot two 3d bar graphs (two dimensional) of the real and imaginary part of the density matrix rho. Args: rho (ndarray): Numpy array for state vector or density matrix. title (str): a string that represents the plot title figsize (tuple): Figure size in inches. color (list): A list of len=2 giving colors for real and imaginary components of matrix elements. alpha (float): Transparency value for bars Returns: matplotlib.Figure: The matplotlib.Figure of the visualization Raises: ImportError: Requires matplotlib. ValueError: When 'color' is not a list of len=2. """""" if not HAS_MATPLOTLIB: raise ImportError('Must have Matplotlib installed.') rho = _validate_input_state(rho) num = int(np.log2(len(rho))) # get the real and imag parts of rho datareal = np.real(rho) dataimag = np.imag(rho) # get the labels column_names = [bin(i)[2:].zfill(num) for i in range(2**num)] row_names = [bin(i)[2:].zfill(num) for i in range(2**num)] lx = len(datareal[0]) # Work out matrix dimensions ly = len(datareal[:, 0]) xpos = np.arange(0, lx, 1) # Set up a mesh of positions ypos = np.arange(0, ly, 1) xpos, ypos = np.meshgrid(xpos+0.25, ypos+0.25) xpos = xpos.flatten() ypos = ypos.flatten() zpos = np.zeros(lx*ly) dx = 0.5 * np.ones_like(zpos) # width of bars dy = dx.copy() dzr = datareal.flatten() dzi = dataimag.flatten() if color is None: color = [""#648fff"", ""#648fff""] else: if len(color) != 2: raise ValueError(""'color' must be a list of len=2."") if color[0] is None: color[0] = ""#648fff"" if color[1] is None: color[1] = ""#648fff"" # set default figure size if figsize is None: figsize = (15, 5) fig = plt.figure(figsize=figsize) ax1 = fig.add_subplot(1, 2, 1, projection='3d') x = [0, max(xpos)+0.5, max(xpos)+0.5, 0] y = [0, 0, max(ypos)+0.5, max(ypos)+0.5] z = [0, 0, 0, 0] verts = [list(zip(x, y, z))] fc1 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzr, color[0]) for idx, cur_zpos in enumerate(zpos): if dzr[idx] > 0: zorder = 2 else: zorder = 0 b1 = ax1.bar3d(xpos[idx], ypos[idx], cur_zpos, dx[idx], dy[idx], dzr[idx], alpha=alpha, zorder=zorder) b1.set_facecolors(fc1[6*idx:6*idx+6]) pc1 = Poly3DCollection(verts, alpha=0.15, facecolor='k', linewidths=1, zorder=1) if min(dzr) < 0 < max(dzr): ax1.add_collection3d(pc1) ax2 = fig.add_subplot(1, 2, 2, projection='3d') fc2 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzi, color[1]) for idx, cur_zpos in enumerate(zpos): if dzi[idx] > 0: zorder = 2 else: zorder = 0 b2 = ax2.bar3d(xpos[idx], ypos[idx], cur_zpos, dx[idx], dy[idx], dzi[idx], alpha=alpha, zorder=zorder) b2.set_facecolors(fc2[6*idx:6*idx+6]) pc2 = Poly3DCollection(verts, alpha=0.2, facecolor='k', linewidths=1, zorder=1) if min(dzi) < 0 < max(dzi): ax2.add_collection3d(pc2) ax1.set_xticks(np.arange(0.5, lx+0.5, 1)) ax1.set_yticks(np.arange(0.5, ly+0.5, 1)) max_dzr = max(dzr) min_dzr = min(dzr) min_dzi = np.min(dzi) max_dzi = np.max(dzi) if max_dzr != min_dzr: ax1.axes.set_zlim3d(np.min(dzr), max(np.max(dzr)+1e-9, np.max(dzi))) else: if min_dzr == 0: ax1.axes.set_zlim3d(np.min(dzr), max(np.max(dzr)+1e-9, np.max(dzi))) else: ax1.axes.set_zlim3d(auto=True) ax1.get_autoscalez_on() ax1.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45) ax1.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5) ax1.set_zlabel(r'Re[$\rho$]', fontsize=14) for tick in ax1.zaxis.get_major_ticks(): tick.label.set_fontsize(14) ax2.set_xticks(np.arange(0.5, lx+0.5, 1)) ax2.set_yticks(np.arange(0.5, ly+0.5, 1)) if min_dzi != max_dzi: eps = 0 ax2.axes.set_zlim3d(np.min(dzi), max(np.max(dzr)+1e-9, np.max(dzi)+eps)) else: if min_dzi == 0: ax2.set_zticks([0]) eps = 1e-9 ax2.axes.set_zlim3d(np.min(dzi), max(np.max(dzr)+1e-9, np.max(dzi)+eps)) else: ax2.axes.set_zlim3d(auto=True) ax2.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45) ax2.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5) ax2.set_zlabel('Im[$\\rho$]', fontsize=14) for tick in ax2.zaxis.get_major_ticks(): tick.label.set_fontsize(14) ax2.get_autoscalez_on() plt.suptitle(title, fontsize=16) plt.tight_layout() if get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']: plt.close(fig) return fig " 5317,"def _has_name_tag(tags, name): return any(item.get('Value', None) == name for item in tags if item.get('Key', None) == 'Name') ","def _has_name_tag(tags, name): return any(item.get('Value') == name for item in tags if item.get('Key') == 'Name') " 4260,"def get_point_spread(resmat, src, idx, mode=None, n_comp=1, norm=False, return_svd_vars=False): """"""Get point-spread (PSFs) functions for vertices. Parameters ---------- resmat : array, shape (n_dipoles, n_dipoles) Forward Operator. src : instance of SourceSpaces Source space used to compute resolution matrix. idx : list of int | list of Label Source for indices for which to compute PSFs or CTFs. If mode is None, PSFs/CTFs will be returned for all indices. If mode is not None, the corresponding summary measure will be computed across all PSFs/CTFs available from idx. Can be: * list of integers : Compute PSFs/CTFs for all indices to source space vertices specified in idx. * list of Label : Compute PSFs/CTFs for source space vertices in specified labels. mode : None | 'mean' | 'max' | 'svd' Compute summary of PSFs/CTFs across all indices specified in 'idx'. Can be: * None : Output individual PSFs/CTFs for each specific vertex (Default). * 'mean' : Mean of PSFs/CTFs across vertices. * 'max' : PSFs/CTFs with maximum norm across vertices. Returns the n_comp largest PSFs/CTFs. * 'svd' : SVD components across PSFs/CTFs across vertices. Returns the n_comp first SVD components. n_comp : int Number of PSF/CTF components to return for mode='max' or mode='svd'. Default n_comp=1. norm : None | 'max' | 'norm' Whether and how to normalise the PSFs and CTFs. This will be applied before computing summaries as specified in 'mode'. Can be: * None : Use un-normalized PSFs/CTFs (Default). * 'max' : Normalize to maximum absolute value across all PSFs/CTFs. * 'norm' : Normalize to maximum norm across all PSFs/CTFs. return_svd_vars : bool Whether or not to return the explained variances across the specified vertices for individual SVD components. This is only valid if mode='svd'. Default return_svd_vars=False. Returns ------- stc : instance of SourceEstimate PSFs or CTFs as an STC object. All PSFs/CTFs will be returned as successive samples in one STC object, in the order they are specified in idx. PSFs/CTFs for labels are grouped together. return_svd_vars : 1D array The explained variances of SVD components across the PSFs/CTFs for the specified vertices. Only returned if mode='svd' and return_svd_vars=True. """""" return _get_psf_ctf(resmat, src, idx, func='psf', mode=mode, n_comp=n_comp, norm=norm, return_svd_vars=return_svd_vars) ","def get_point_spread(resmat, src, idx, mode=None, n_comp=1, norm=False, return_svd_vars=False): """"""Get point-spread (PSFs) functions for vertices. Parameters ---------- resmat : array, shape (n_dipoles, n_dipoles) Forward Operator. src : instance of SourceSpaces Source space used to compute resolution matrix. idx : list of int | list of Label Source for indices for which to compute PSFs or CTFs. If mode is None, PSFs/CTFs will be returned for all indices. If mode is not None, the corresponding summary measure will be computed across all PSFs/CTFs available from idx. Can be: * list of integers : Compute PSFs/CTFs for all indices to source space vertices specified in idx. * list of Label : Compute PSFs/CTFs for source space vertices in specified labels. mode : None | 'mean' | 'max' | 'svd' Compute summary of PSFs/CTFs across all indices specified in 'idx'. Can be: * None : Output individual PSFs/CTFs for each specific vertex (Default). * 'mean' : Mean of PSFs/CTFs across vertices. * 'max' : PSFs/CTFs with maximum norm across vertices. Returns the n_comp largest PSFs/CTFs. * 'svd' : SVD components across PSFs/CTFs across vertices. Returns the n_comp first SVD components. n_comp : int Number of PSF/CTF components to return for mode='max' or mode='svd'. Default n_comp=1. norm : None | 'max' | 'norm' Whether and how to normalise the PSFs and CTFs. This will be applied before computing summaries as specified in 'mode'. Can be: * None : Use un-normalized PSFs/CTFs (Default). * 'max' : Normalize to maximum absolute value across all PSFs/CTFs. * 'norm' : Normalize to maximum norm across all PSFs/CTFs. return_svd_vars : bool Whether or not to return the explained variances across the specified vertices for individual SVD components. This is only valid if mode='svd'. Default return_svd_vars=False. Returns ------- stc : instance of SourceEstimate PSFs or CTFs as an STC object. All PSFs/CTFs will be returned as successive samples in one STC object, in the order they are specified in idx. PSFs/CTFs for labels are grouped together. return_svd_vars : ndarray, shape (...) The explained variances of SVD components across the PSFs/CTFs for the specified vertices. Only returned if mode='svd' and return_svd_vars=True. """""" return _get_psf_ctf(resmat, src, idx, func='psf', mode=mode, n_comp=n_comp, norm=norm, return_svd_vars=return_svd_vars) " 7505,"def _remove_download_cache(pkgname='astropy'): zapped_cache = False try: with contextlib.ExitStack() as stack: try: dldir, url2hash = stack.enter_context( _cache(pkgname, write=True)) except (RuntimeError, WrongDBMModule): # Couldn't get lock # Release lock by blowing away cache # Need to get locations dldir, _ = _get_download_cache_locs(pkgname) url2hash = None except OSError as e: # Problem arose when trying to open the cache msg = 'Not clearing data cache - cache inaccessible due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return if os.path.exists(dldir): # This can be awkward if the shelve is still open # NFS can't delete an open file if url2hash is not None: url2hash.close() shutil.rmtree(dldir) zapped_cache = True except OSError as e: if zapped_cache and e.errno == errno.ENOENT: # We just deleted the directory and, on Windows (?) the ""dumb"" # backend tried to write itself out to a nonexistent directory. # It's fine for this to fail. return else: msg = 'Not clearing data from cache - problem arose ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return ","def _remove_download_cache(pkgname='astropy'): zapped_cache = False try: with contextlib.ExitStack() as stack: try: dldir, url2hash = stack.enter_context( _cache(pkgname, write=True)) except (RuntimeError, WrongDBMModule): # Couldn't get lock # Release lock by blowing away cache # Need to get locations dldir, _ = _get_download_cache_locs(pkgname) url2hash = None except OSError as e: # Problem arose when trying to open the cache msg = 'Not clearing data cache - cache inaccessible due to ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return if os.path.exists(dldir): # This can be awkward if the shelve is still open # NFS can't delete an open file if url2hash is not None: url2hash.close() shutil.rmtree(dldir) zapped_cache = True except OSError as e: if zapped_cache and e.errno == errno.ENOENT: # We just deleted the directory and, on Windows (?) the ""dumb"" # backend tried to write itself out to a nonexistent directory. # It's fine for this to fail. pass else: msg = 'Not clearing data from cache - problem arose ' estr = '' if len(e.args) < 1 else (': ' + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return " 49872,"def campbellnorman(zenith, transmittance, pressure=101325.0, dni_extra=1367.0): ''' Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and atmospheric pressure. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- .. [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. ''' tau = transmittance airmass = atmosphere.get_relative_airmass(zenith, model='simple') airmass = atmosphere.get_absolute_airmass(airmass, pressure=pressure) dni = dni_extra*tau**airmass dhi = 0.3 * (1.0 - tau**airmass) * dni_extra * np.cos(np.radians(zenith)) ghi = dhi + dni * np.cos(np.radians(zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(ghi, pd.Series): irrads = pd.DataFrame(irrads) return irrads ","def campbellnorman(zenith, transmittance, pressure=101325.0, dni_extra=1367.0): ''' Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and atmospheric pressure. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- .. [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. ''' tau = transmittance airmass = atmosphere.get_relative_airmass(zenith, model='simple') airmass = atmosphere.get_absolute_airmass(airmass, pressure=pressure) dni = dni_extra*tau**airmass cos_zen = np.cos(np.radians(zenith)) dhi = 0.3 * (1.0 - tau**airmass) * dni_extra * cos_zen ghi = dhi + dni * cos_zen irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(ghi, pd.Series): irrads = pd.DataFrame(irrads) return irrads " 23682,"def calc_spectral_mismatch(sr, e_sun, e_ref=None): """""" Calculate the spectral mismatch under a given measured spectrum with respect to a reference spectrum. Parameters ---------- sr: pandas.Series The spectral response of one (photovoltaic) device. The index of the Series must contain wavelength values in nm. e_sun: pandas.DataFrame or pandase.Series One or more measured irradiance spectra in a pandas.DataFrame having wavelength in nm as column index. A single spectrum may be be given as a pandas.Series having wavelength in nm as index. e_ref: pandas.Series, optional The reference spectrum to use for the mismatch calculation. The index of the Series must contain wavelength values in nm. The default is the ASTM G173-03 global tilted spectrum. Returns ------- smm: pandas.Series or float if a single measured spectrum is provided. Notes ----- If the default reference spectrum is used it is linearly interpolated to the wavelengths of the measured spectrum. To achieve alternate behavior e_ref can be transformed before calling this function and provided as an argument. The spectral response is linearly interpolated to the wavelengths of the spectrum with which is it multiplied internally (e_sun and e_ref). To achieve alternate behavior the sr can be transformed before calling this function. """""" # get the reference spectrum at wavelengths matching the measured spectra if e_ref is None: e_ref = get_am15g(wavelength=e_sun.T.index) # interpolate the sr at the wavelengths of the spectra # reference spectrum wavelengths may differ if e_ref is from caller sr_sun = np.interp(e_sun.T.index, sr.index, sr, left=0.0, right=0.0) sr_ref = np.interp(e_ref.T.index, sr.index, sr, left=0.0, right=0.0) # a helper function to make usable fraction calculations more readable def integrate(e): return np.trapz(e, x=e.T.index, axis=-1) # calculate usable fractions uf_sun = integrate(e_sun * sr_sun) / integrate(e_sun) uf_ref = integrate(e_ref * sr_ref) / integrate(e_ref) # mismatch is the ratio or quotient of the usable fractions smm = uf_sun / uf_ref if isinstance(e_sun, pd.DataFrame): smm = pd.Series(smm, index=e_sun.index) return smm ","def calc_spectral_mismatch(sr, e_sun, e_ref=None): """""" Calculate the spectral mismatch under a given measured spectrum with respect to a reference spectrum. Parameters ---------- sr: pandas.Series The spectral response of one (photovoltaic) device. The index of the Series must contain wavelength values in nm. e_sun: pandas.DataFrame or pandas.Series One or more measured irradiance spectra in a pandas.DataFrame having wavelength in nm as column index. A single spectrum may be be given as a pandas.Series having wavelength in nm as index. e_ref: pandas.Series, optional The reference spectrum to use for the mismatch calculation. The index of the Series must contain wavelength values in nm. The default is the ASTM G173-03 global tilted spectrum. Returns ------- smm: pandas.Series or float if a single measured spectrum is provided. Notes ----- If the default reference spectrum is used it is linearly interpolated to the wavelengths of the measured spectrum. To achieve alternate behavior e_ref can be transformed before calling this function and provided as an argument. The spectral response is linearly interpolated to the wavelengths of the spectrum with which is it multiplied internally (e_sun and e_ref). To achieve alternate behavior the sr can be transformed before calling this function. """""" # get the reference spectrum at wavelengths matching the measured spectra if e_ref is None: e_ref = get_am15g(wavelength=e_sun.T.index) # interpolate the sr at the wavelengths of the spectra # reference spectrum wavelengths may differ if e_ref is from caller sr_sun = np.interp(e_sun.T.index, sr.index, sr, left=0.0, right=0.0) sr_ref = np.interp(e_ref.T.index, sr.index, sr, left=0.0, right=0.0) # a helper function to make usable fraction calculations more readable def integrate(e): return np.trapz(e, x=e.T.index, axis=-1) # calculate usable fractions uf_sun = integrate(e_sun * sr_sun) / integrate(e_sun) uf_ref = integrate(e_ref * sr_ref) / integrate(e_ref) # mismatch is the ratio or quotient of the usable fractions smm = uf_sun / uf_ref if isinstance(e_sun, pd.DataFrame): smm = pd.Series(smm, index=e_sun.index) return smm " 4256,"def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, picks=None): """"""RAP-MUSIC for evoked data. Parameters ---------- data : array, shape (n_channels, n_times) Evoked data. info : dict Measurement info. times : array Times. forward : instance of Forward Forward operator. noise_cov : instance of Covariance The noise covariance. n_dipoles : int The number of dipoles to estimate. The default value is 2. picks : list of int Caller ensures this is a list of int. Returns ------- dipoles : list of instances of Dipole The dipole fits. explained_data : array | None Data explained by the dipoles using a least square fitting with the selected active dipoles and their estimated orientation. Computed only if return_explained_data is True. """""" info = pick_info(info, picks) del picks # things are much simpler if we avoid surface orientation align = forward['source_nn'].copy() if forward['surf_ori'] and not is_fixed_orient(forward): forward = convert_forward_solution(forward, surf_ori=False) is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input( info, forward, noise_cov=noise_cov, rank=None) forward = pick_channels_forward(forward, info['ch_names'], ordered=True) del info # whiten the data (leadfield already whitened) M = np.dot(whitener, data) del data _, eig_vectors = linalg.eigh(np.dot(M, M.T)) phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 G.shape = (G.shape[0], -1, n_orient) gain = forward['sol']['data'].copy() gain.shape = G.shape n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) oris = np.empty((n_dipoles, 3)) poss = np.empty((n_dipoles, 3)) G_proj = G.copy() phi_sig_proj = phi_sig.copy() idxs = list() for k in range(n_dipoles): subcorr_max = -1. source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0] for i_source in range(G.shape[1]): Gk = G_proj[:, i_source] subcorr, ori = _compute_subcorr(Gk, phi_sig_proj) if subcorr > subcorr_max: subcorr_max = subcorr source_idx = i_source source_ori = ori source_pos = forward['source_rr'][i_source] if n_orient == 3 and align is not None: surf_normal = forward['source_nn'][3 * i_source + 2] # make sure ori is aligned to the surface orientation source_ori *= np.sign(source_ori @ surf_normal) or 1. if n_orient == 1: source_ori = forward['source_nn'][i_source] idxs.append(source_idx) if n_orient == 3: Ak = np.dot(G[:, source_idx], source_ori) else: Ak = G[:, source_idx, 0] A[:, k] = Ak oris[k] = source_ori poss[k] = source_pos logger.info(""source %s found: p = %s"" % (k + 1, source_idx)) if n_orient == 3: logger.info(""ori = %s %s %s"" % tuple(oris[k])) projection = _compute_proj(A[:, :k + 1]) G_proj = np.einsum('ab,bso->aso', projection, G) phi_sig_proj = np.dot(projection, phi_sig) del G, G_proj sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] X.shape = (-1, len(times)) else: X = sol gain_active = gain[:, idxs] if n_orient == 3: gain_dip = (oris * gain_active).sum(-1) idxs = np.array(idxs) active_set = np.array( [[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() else: gain_dip = gain_active[:, :, 0] active_set = idxs gain_active = whitener @ gain_active.reshape(gain.shape[0], -1) assert gain_active.shape == (n_channels, X.shape[0]) explained_data = gain_dip @ sol M_estimate = whitener @ explained_data _log_exp_var(M, M_estimate) tstep = np.median(np.diff(times)) if len(times) > 1 else 1. dipoles = _make_dipoles_sparse( X, active_set, forward, times[0], tstep, M, gain_active, active_is_idx=True) for dipole, ori in zip(dipoles, oris): signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True)) dipole.ori *= signs dipole.amplitude *= signs[:, 0] logger.info('[done]') return dipoles, explained_data ","def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, picks=None): """"""RAP-MUSIC for evoked data. Parameters ---------- data : array, shape (n_channels, n_times) Evoked data. info : dict Measurement info. times : array Times. forward : instance of Forward Forward operator. noise_cov : instance of Covariance The noise covariance. n_dipoles : int The number of dipoles to estimate. The default value is 2. picks : list of int Caller ensures this is a list of int. Returns ------- dipoles : list of instances of Dipole The dipole fits. explained_data : array | None Data explained by the dipoles using a least square fitting with the selected active dipoles and their estimated orientation. Computed only if return_explained_data is True. """""" info = pick_info(info, picks) del picks # things are much simpler if we avoid surface orientation align = forward['source_nn'].copy() if forward['surf_ori'] and not is_fixed_orient(forward): forward = convert_forward_solution(forward, surf_ori=False) is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input( info, forward, noise_cov=noise_cov, rank=None) forward = pick_channels_forward(forward, info['ch_names'], ordered=True) del info # whiten the data (leadfield already whitened) M = np.dot(whitener, data) del data _, eig_vectors = linalg.eigh(np.dot(M, M.T)) phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 G.shape = (G.shape[0], -1, n_orient) gain = forward['sol']['data'].copy() gain.shape = G.shape n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) oris = np.empty((n_dipoles, 3)) poss = np.empty((n_dipoles, 3)) G_proj = G.copy() phi_sig_proj = phi_sig.copy() idxs = list() for k in range(n_dipoles): subcorr_max = -1. source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0] for i_source in range(G_proj.shape[1]): Gk = G_proj[:, i_source] subcorr, ori = _compute_subcorr(Gk, phi_sig_proj) if subcorr > subcorr_max: subcorr_max = subcorr source_idx = i_source source_ori = ori source_pos = forward['source_rr'][i_source] if n_orient == 3 and align is not None: surf_normal = forward['source_nn'][3 * i_source + 2] # make sure ori is aligned to the surface orientation source_ori *= np.sign(source_ori @ surf_normal) or 1. if n_orient == 1: source_ori = forward['source_nn'][i_source] idxs.append(source_idx) if n_orient == 3: Ak = np.dot(G[:, source_idx], source_ori) else: Ak = G[:, source_idx, 0] A[:, k] = Ak oris[k] = source_ori poss[k] = source_pos logger.info(""source %s found: p = %s"" % (k + 1, source_idx)) if n_orient == 3: logger.info(""ori = %s %s %s"" % tuple(oris[k])) projection = _compute_proj(A[:, :k + 1]) G_proj = np.einsum('ab,bso->aso', projection, G) phi_sig_proj = np.dot(projection, phi_sig) del G, G_proj sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] X.shape = (-1, len(times)) else: X = sol gain_active = gain[:, idxs] if n_orient == 3: gain_dip = (oris * gain_active).sum(-1) idxs = np.array(idxs) active_set = np.array( [[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() else: gain_dip = gain_active[:, :, 0] active_set = idxs gain_active = whitener @ gain_active.reshape(gain.shape[0], -1) assert gain_active.shape == (n_channels, X.shape[0]) explained_data = gain_dip @ sol M_estimate = whitener @ explained_data _log_exp_var(M, M_estimate) tstep = np.median(np.diff(times)) if len(times) > 1 else 1. dipoles = _make_dipoles_sparse( X, active_set, forward, times[0], tstep, M, gain_active, active_is_idx=True) for dipole, ori in zip(dipoles, oris): signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True)) dipole.ori *= signs dipole.amplitude *= signs[:, 0] logger.info('[done]') return dipoles, explained_data " 14110,"def _get_srid_and_geom_from_postgis(name, con, schema=None, geom_name=None): """""" Get Geometry/Geography column name, SRID, and SQLalchemy type. Parameters ---------- name : str Name of the target table. con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine Active connection to the PostGIS database. schema : str, optional Specify the schema. If None, use default schema: 'public'. geom_name : str, optional Geometry/Geography column name. If None (default) tries to infer it from database. Returns ------- str Geometry/Geography column name. int Geometry/Geography column SRID. Geometry or Geography geoalchemy2 class used to query the Geometry/Geography column. """""" try: from geoalchemy2 import Geography, Geometry from sqlalchemy.exc import NoSuchTableError except ImportError: raise ImportError( ""geopandas requires the geoalchemy2 package to interface with PostGIS."" ) if schema is None: schema = ""public"" with _get_conn(con) as connection: if not connection.dialect.has_table(connection, name, schema): raise NoSuchTableError(f""table {name} doesn't exist in {schema} schema"") for gtype, gclass in [(""geometry"", Geometry), (""geography"", Geography)]: query = ( f""SELECT F_{gtype.upper()}_COLUMN, SRID, TYPE "" f""FROM {gtype.upper()}_COLUMNS "" f""WHERE "" f""F_TABLE_SCHEMA = '{schema}' AND "" f""F_TABLE_NAME = '{name}'"" ) if geom_name is None: result = connection.execute(query).all() if result is not None and len(result) > 0: if len(result) > 1: raise ValueError( f""found multiple {gtype} columns in {name} table, "" f""please provide geom_name"" ) geom_name, srid, geom_type = result[0] return ( geom_name, int(srid), gclass(geometry_type=geom_type, srid=int(srid)), ) else: query += f"" AND F_{gtype.upper()}_COLUMN = '{geom_name}'"" result = connection.execute(query).first() if result is not None: geom_name, srid, geom_type = result return ( geom_name, int(srid), gclass(geometry_type=geom_type, srid=int(srid)), ) msg = [ ""Cannot find Geometry or Geography column "", f""in table {name} in schema {schema}"", ] if geom_name is not None: msg[0] += f""{geom_name} "" raise ValueError("""".join(msg)) ","def _get_srid_and_geom_from_postgis(name, con, schema=None, geom_name=None): """""" Get Geometry/Geography column name, SRID, and SQLalchemy type. Parameters ---------- name : str Name of the target table. con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine Active connection to the PostGIS database. schema : str, optional Specify the schema. If None, use default schema: 'public'. geom_name : str, optional Geometry/Geography column name. If None (default) tries to infer it from database. Returns ------- str Geometry/Geography column name. int Geometry/Geography column SRID. Geometry or Geography geoalchemy2 class used to query the Geometry/Geography column. """""" try: from geoalchemy2 import Geography, Geometry from sqlalchemy.exc import NoSuchTableError except ImportError: raise ImportError( ""geopandas requires the geoalchemy2/sqlalchemy packages to interface with PostGIS."" ) if schema is None: schema = ""public"" with _get_conn(con) as connection: if not connection.dialect.has_table(connection, name, schema): raise NoSuchTableError(f""table {name} doesn't exist in {schema} schema"") for gtype, gclass in [(""geometry"", Geometry), (""geography"", Geography)]: query = ( f""SELECT F_{gtype.upper()}_COLUMN, SRID, TYPE "" f""FROM {gtype.upper()}_COLUMNS "" f""WHERE "" f""F_TABLE_SCHEMA = '{schema}' AND "" f""F_TABLE_NAME = '{name}'"" ) if geom_name is None: result = connection.execute(query).all() if result is not None and len(result) > 0: if len(result) > 1: raise ValueError( f""found multiple {gtype} columns in {name} table, "" f""please provide geom_name"" ) geom_name, srid, geom_type = result[0] return ( geom_name, int(srid), gclass(geometry_type=geom_type, srid=int(srid)), ) else: query += f"" AND F_{gtype.upper()}_COLUMN = '{geom_name}'"" result = connection.execute(query).first() if result is not None: geom_name, srid, geom_type = result return ( geom_name, int(srid), gclass(geometry_type=geom_type, srid=int(srid)), ) msg = [ ""Cannot find Geometry or Geography column "", f""in table {name} in schema {schema}"", ] if geom_name is not None: msg[0] += f""{geom_name} "" raise ValueError("""".join(msg)) " 45678,"def description(): return 'Two-dimensional visualizations of molecular structure.' ","def description(): return 'Two-dimensional visualization of molecular structures.' " 8655,"def get_running_pid(filename): """"""Retrieve the pid number from the given ``filename``. :param str filename: path to file to read the PID from :return: the PID number of a sopel instance if running, ``None`` otherwise :rtype: integer This function tries to retrieve a PID number from the given ``filename``, as an integer, and returns ``None`` if the file is not found or if the content is not an integer. """""" if not os.path.isfile(filename): return with open(filename, 'r') as pid_file: try: return int(pid_file.read()) except ValueError: pass ","def get_running_pid(filename): """"""Retrieve the PID number from the given ``filename``. :param str filename: path to file to read the PID from :return: the PID number of a sopel instance if running, ``None`` otherwise :rtype: integer This function tries to retrieve a PID number from the given ``filename``, as an integer, and returns ``None`` if the file is not found or if the content is not an integer. """""" if not os.path.isfile(filename): return with open(filename, 'r') as pid_file: try: return int(pid_file.read()) except ValueError: pass " 30120,"def get_exp_probability_nothing_common( mutation_rate, ksize, scaled, n_unique_kmers=None, sequence_len_bp=None ): """""" Given parameters, calculate the expected probability that nothing will be common between a fracminhash sketch of a original sequence and a fracminhash sketch of a mutated sequence. If this is above a threshold, we should suspect that the two sketches may have nothing in common. The threshold needs to be set with proper insights. Arguments: n_unique_kmers, ksize, mutation_rate, scaled Returns: float - expected likelihood that nothing is common between sketches """""" # NTP note: do we have any checks for ksize >=1 in sourmash_args? The rest should be taken care of. # assert 0.0 <= mutation_rate <= 1.0 and ksize >= 1 and scaled >= 1 if sequence_len_bp and not n_unique_kmers: n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize) f_scaled = 1.0 / float(scaled) if mutation_rate == 1.0: return 1.0 elif mutation_rate == 0.0: return 0.0 return exp( get_expected_log_probability(n_unique_kmers, ksize, mutation_rate, f_scaled) ) ","def get_exp_probability_nothing_common( mutation_rate, ksize, scaled, *, n_unique_kmers=None, sequence_len_bp=None ): """""" Given parameters, calculate the expected probability that nothing will be common between a fracminhash sketch of a original sequence and a fracminhash sketch of a mutated sequence. If this is above a threshold, we should suspect that the two sketches may have nothing in common. The threshold needs to be set with proper insights. Arguments: n_unique_kmers, ksize, mutation_rate, scaled Returns: float - expected likelihood that nothing is common between sketches """""" # NTP note: do we have any checks for ksize >=1 in sourmash_args? The rest should be taken care of. # assert 0.0 <= mutation_rate <= 1.0 and ksize >= 1 and scaled >= 1 if sequence_len_bp and not n_unique_kmers: n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize) f_scaled = 1.0 / float(scaled) if mutation_rate == 1.0: return 1.0 elif mutation_rate == 0.0: return 0.0 return exp( get_expected_log_probability(n_unique_kmers, ksize, mutation_rate, f_scaled) ) " 28402,"def unset(update, context): """"""Remove the job if the user changed their mind."""""" chat_id = update.message.chat_id job_removed = remove_job_if_exists(str(chat_id), context) text = 'Timer successfully unset!' if job_removed else 'You have no active timer' update.message.reply_text(text) ","def unset(update, context): """"""Remove the job if the user changed their mind."""""" chat_id = update.message.chat_id job_removed = remove_job_if_exists(str(chat_id), context) text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer.' update.message.reply_text(text) " 5856,"def _masked_arrays_2_sentinel_arrays(samples): # masked arrays in `samples` are converted to regular arrays, and values # corresponding with masked elements are replaced with a sentinel value # return without modifying arrays if none have a mask has_mask = False for sample in samples: mask = getattr(sample, 'mask', False) has_mask = has_mask or np.any(mask) if not has_mask: return samples, None # None means there is no sentinel value # Choose a sentinel value. We can't use `np.nan`, because sentinel (masked) # values are always omitted, but there are different nan policies. dtype = np.result_type(*samples) dtype = dtype if np.issubdtype(dtype, np.number) else np.float64 for i in range(len(samples)): # Things get more complicated if the arrays are of different types. # We could have different sentinel values for each array, but # the purpose of this code is convenience, not efficiency. samples[i] = samples[i].astype(dtype, copy=False) inexact = np.issubdtype(dtype, np.inexact) info = np.finfo if inexact else np.iinfo max_possible, min_possible = info(dtype).max, info(dtype).min nextafter = np.nextafter if inexact else (lambda x, _: x - 1) sentinel = max_possible # For simplicity, min_possible/np.infs are not candidate sentinel values while sentinel > min_possible: for sample in samples: if np.any(sample == sentinel): # choose a new sentinel value sentinel = nextafter(sentinel, -np.inf) break else: # when sentinel value is OK, break the while loop break else: message = (""This function replaces masked elements with sentinel "" ""values, but the data contains all distinct values of this "" ""data type. Consider promoting the dtype to `np.float64`."") raise ValueError(message) # replace masked elements with sentinel value out_samples = [] for sample in samples: mask = getattr(sample, 'mask', None) if mask is not None: # turn all masked arrays into sentinel arrays mask = np.broadcast_to(mask, sample.shape) sample = np.asarray(sample) # don't modify original array sample[mask] = sentinel out_samples.append(sample) return out_samples, sentinel ","def _masked_arrays_2_sentinel_arrays(samples): # masked arrays in `samples` are converted to regular arrays, and values # corresponding with masked elements are replaced with a sentinel value # return without modifying arrays if none have a mask has_mask = False for sample in samples: mask = getattr(sample, 'mask', False) has_mask = has_mask or np.any(mask) if not has_mask: return samples, None # None means there is no sentinel value # Choose a sentinel value. We can't use `np.nan`, because sentinel (masked) # values are always omitted, but there are different nan policies. dtype = np.result_type(*samples) dtype = dtype if np.issubdtype(dtype, np.number) else np.float64 for i in range(len(samples)): # Things get more complicated if the arrays are of different types. # We could have different sentinel values for each array, but # the purpose of this code is convenience, not efficiency. samples[i] = samples[i].astype(dtype, copy=False) inexact = np.issubdtype(dtype, np.inexact) info = np.finfo if inexact else np.iinfo max_possible, min_possible = info(dtype).max, info(dtype).min nextafter = np.nextafter if inexact else (lambda x, _: x - 1) sentinel = max_possible # For simplicity, min_possible/np.infs are not candidate sentinel values while sentinel > min_possible: for sample in samples: if np.any(sample == sentinel): # choose a new sentinel value sentinel = nextafter(sentinel, -np.inf) break else: # when sentinel value is OK, break the while loop break else: message = (""This function replaces masked elements with sentinel "" ""values, but the data contains all distinct values of this "" ""data type. Consider promoting the dtype to `np.float64`."") raise ValueError(message) # replace masked elements with sentinel value out_samples = [] for sample in samples: mask = getattr(sample, 'mask', None) if mask is not None: # turn all masked arrays into sentinel arrays mask = np.broadcast_to(mask, sample.shape) sample = np.asarray(sample) # don't modify original array sample[mask] = sentinel out_samples.append(sample) return out_samples, sentinel " 995,"def exception_colors(): """"""Return a color table with fields for exception reporting. The table is an instance of ColorSchemeTable with schemes added for 'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled in. Examples: >>> ec = exception_colors() >>> ec.active_scheme_name '' >>> print(ec.active_colors) None Now we activate a color scheme: >>> ec.set_active_scheme('NoColor') >>> ec.active_scheme_name 'NoColor' >>> sorted(ec.active_colors.keys()) ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line', 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName', 'val', 'valEm'] """""" ex_colors = ColorSchemeTable() # Populate it with color schemes C = TermColors # shorthand and local lookup ex_colors.add_scheme(ColorScheme( 'NoColor', # The color to be used for the top line topline = C.NoColor, # The colors to be used in the traceback filename = C.NoColor, lineno = C.NoColor, name = C.NoColor, vName = C.NoColor, val = C.NoColor, em = C.NoColor, # Emphasized colors for the last frame of the traceback normalEm = C.NoColor, filenameEm = C.NoColor, linenoEm = C.NoColor, nameEm = C.NoColor, valEm = C.NoColor, # Colors for printing the exception excName = C.NoColor, line = C.NoColor, caret = C.NoColor, Normal = C.NoColor )) # make some schemes as instances so we can copy them for modification easily ex_colors.add_scheme(ColorScheme( 'Linux', # The color to be used for the top line topline = C.LightRed, # The colors to be used in the traceback filename = C.Green, lineno = C.Green, name = C.Purple, vName = C.Cyan, val = C.Green, em = C.LightCyan, # Emphasized colors for the last frame of the traceback normalEm = C.LightCyan, filenameEm = C.LightGreen, linenoEm = C.LightGreen, nameEm = C.LightPurple, valEm = C.LightBlue, # Colors for printing the exception excName = C.LightRed, line = C.Yellow, caret = C.White, Normal = C.Normal, ExecutingNode = 'bg:#00005f' )) # For light backgrounds, swap dark/light colors ex_colors.add_scheme(ColorScheme( 'LightBG', # The color to be used for the top line topline = C.Red, # The colors to be used in the traceback filename = C.LightGreen, lineno = C.LightGreen, name = C.LightPurple, vName = C.Cyan, val = C.LightGreen, em = C.Cyan, # Emphasized colors for the last frame of the traceback normalEm = C.Cyan, filenameEm = C.Green, linenoEm = C.Green, nameEm = C.Purple, valEm = C.Blue, # Colors for printing the exception excName = C.Red, #line = C.Brown, # brown often is displayed as yellow line = C.Red, caret = C.Normal, Normal = C.Normal, ExecutingNode = 'bg:#005f00' )) ex_colors.add_scheme(ColorScheme( 'Neutral', # The color to be used for the top line topline = C.Red, # The colors to be used in the traceback filename = C.LightGreen, lineno = C.LightGreen, name = C.LightPurple, vName = C.Cyan, val = C.LightGreen, em = C.Cyan, # Emphasized colors for the last frame of the traceback normalEm = C.Cyan, filenameEm = C.Green, linenoEm = C.Green, nameEm = C.Purple, valEm = C.Blue, # Colors for printing the exception excName = C.Red, #line = C.Brown, # brown often is displayed as yellow line = C.Red, caret = C.Normal, Normal = C.Normal, ExecutingNode = 'bg:#dddddd' )) # Hack: the 'neutral' colours are not very visible on a dark background on # Windows. Since Windows command prompts have a dark background by default, and # relatively few users are likely to alter that, we will use the 'Linux' colours, # designed for a dark background, as the default on Windows. if os.name == ""nt"": ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral')) return ex_colors ","def exception_colors(): """"""Return a color table with fields for exception reporting. The table is an instance of ColorSchemeTable with schemes added for 'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled in. Examples: >>> ec = exception_colors() >>> ec.active_scheme_name '' >>> print(ec.active_colors) None Now we activate a color scheme: >>> ec.set_active_scheme('NoColor') >>> ec.active_scheme_name 'NoColor' >>> sorted(ec.active_colors.keys()) ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line', 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName', 'val', 'valEm'] """""" ex_colors = ColorSchemeTable() # Populate it with color schemes C = TermColors # shorthand and local lookup ex_colors.add_scheme(ColorScheme( 'NoColor', # The color to be used for the top line topline = C.NoColor, # The colors to be used in the traceback filename = C.NoColor, lineno = C.NoColor, name = C.NoColor, vName = C.NoColor, val = C.NoColor, em = C.NoColor, # Emphasized colors for the last frame of the traceback normalEm = C.NoColor, filenameEm = C.NoColor, linenoEm = C.NoColor, nameEm = C.NoColor, valEm = C.NoColor, # Colors for printing the exception excName = C.NoColor, line = C.NoColor, caret = C.NoColor, Normal = C.NoColor )) # make some schemes as instances so we can copy them for modification easily ex_colors.add_scheme(ColorScheme( 'Linux', # The color to be used for the top line topline = C.LightRed, # The colors to be used in the traceback filename = C.Green, lineno = C.Green, name = C.Purple, vName = C.Cyan, val = C.Green, em = C.LightCyan, # Emphasized colors for the last frame of the traceback normalEm = C.LightCyan, filenameEm = C.LightGreen, linenoEm = C.LightGreen, nameEm = C.LightPurple, valEm = C.LightBlue, # Colors for printing the exception excName = C.LightRed, line = C.Yellow, caret = C.White, Normal = C.Normal, ExecutingNode = 'bg:#00005f', )) # For light backgrounds, swap dark/light colors ex_colors.add_scheme(ColorScheme( 'LightBG', # The color to be used for the top line topline = C.Red, # The colors to be used in the traceback filename = C.LightGreen, lineno = C.LightGreen, name = C.LightPurple, vName = C.Cyan, val = C.LightGreen, em = C.Cyan, # Emphasized colors for the last frame of the traceback normalEm = C.Cyan, filenameEm = C.Green, linenoEm = C.Green, nameEm = C.Purple, valEm = C.Blue, # Colors for printing the exception excName = C.Red, #line = C.Brown, # brown often is displayed as yellow line = C.Red, caret = C.Normal, Normal = C.Normal, ExecutingNode = 'bg:#005f00' )) ex_colors.add_scheme(ColorScheme( 'Neutral', # The color to be used for the top line topline = C.Red, # The colors to be used in the traceback filename = C.LightGreen, lineno = C.LightGreen, name = C.LightPurple, vName = C.Cyan, val = C.LightGreen, em = C.Cyan, # Emphasized colors for the last frame of the traceback normalEm = C.Cyan, filenameEm = C.Green, linenoEm = C.Green, nameEm = C.Purple, valEm = C.Blue, # Colors for printing the exception excName = C.Red, #line = C.Brown, # brown often is displayed as yellow line = C.Red, caret = C.Normal, Normal = C.Normal, ExecutingNode = 'bg:#dddddd' )) # Hack: the 'neutral' colours are not very visible on a dark background on # Windows. Since Windows command prompts have a dark background by default, and # relatively few users are likely to alter that, we will use the 'Linux' colours, # designed for a dark background, as the default on Windows. if os.name == ""nt"": ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral')) return ex_colors " 41782,"def plot_intermediate_values(study): # type: (Study) -> go.Figure """"""Plot intermediate values of all trials in a study. Example: The following code snippet shows how to plot intermediate values. .. testcode:: import optuna # Derivative function for x**2 def df(x): return 2*x def objective(trial): next_x = 1 # We start the search at x=1 gamma = trial.suggest_loguniform('alpha', 1e-5, 1e-1) # Step size multiplier # Stepping through gradient descent to find the minima of x**2 for step in range(100): current_x = next_x next_x = current_x - gamma * df(current_x) delta = next_x - current_x trial.report(current_x, step) return delta study = optuna.create_study() study.optimize(objective, n_trials=5) optuna.visualization.plot_intermediate_values(study) .. raw:: html Args: study: A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate values. Returns: A :class:`plotly.graph_objs.Figure` object. """""" _check_plotly_availability() return _get_intermediate_plot(study) ","def plot_intermediate_values(study): # type: (Study) -> go.Figure """"""Plot intermediate values of all trials in a study. Example: The following code snippet shows how to plot intermediate values. .. testcode:: import optuna # Derivative function for x**2 def df(x): return 2 * x def objective(trial): next_x = 1 # We start the search at x=1 gamma = trial.suggest_loguniform('alpha', 1e-5, 1e-1) # Step size multiplier # Stepping through gradient descent to find the minima of x**2 for step in range(100): current_x = next_x next_x = current_x - gamma * df(current_x) delta = next_x - current_x trial.report(current_x, step) return delta study = optuna.create_study() study.optimize(objective, n_trials=5) optuna.visualization.plot_intermediate_values(study) .. raw:: html Args: study: A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate values. Returns: A :class:`plotly.graph_objs.Figure` object. """""" _check_plotly_availability() return _get_intermediate_plot(study) " 47983,"def write_csv_result(csv_file, processing_info, metric_results, dataset_size, metrics_meta): new_file = not check_file_existence(csv_file) field_names = [ 'model', 'launcher', 'device', 'dataset', 'tags', 'metric_name', 'metric_type', 'metric_value', 'metric_target', 'metric_scale', 'metric_postfix', 'dataset_size', 'ref', 'abs_threshold', 'rel_threshold'] model, launcher, device, tags, dataset = processing_info main_info = { 'model': model, 'launcher': launcher, 'device': device.upper(), 'tags': ' '.join(tags) if tags else '', 'dataset': dataset, 'dataset_size': dataset_size } with open(csv_file, 'a+', newline='') as f: writer = DictWriter(f, fieldnames=field_names) if new_file: writer.writeheader() for metric_result, metric_meta in zip(metric_results, metrics_meta): writer.writerow({ **main_info, 'metric_name': metric_result['name'], 'metric_type': metric_result['type'], 'metric_value': metric_result['value'], 'metric_target': metric_meta.get('target', 'higher-better'), 'metric_scale': metric_meta.get('scale', 100), 'metric_postfix': metric_meta.get('postfix', '%'), 'ref': metric_result.get('ref', ''), 'abs_threshold': metric_result.get('abs_threshold', ''), 'rel_threshold': metric_result.get('rel_threshold', '') }) ","def write_csv_result(csv_file, processing_info, metric_results, dataset_size, metrics_meta): new_file = not check_file_existence(csv_file) field_names = [ 'model', 'launcher', 'device', 'dataset', 'tags', 'metric_name', 'metric_type', 'metric_value', 'metric_target', 'metric_scale', 'metric_postfix', 'dataset_size', 'ref', 'abs_threshold', 'rel_threshold'] model, launcher, device, tags, dataset = processing_info main_info = { 'model': model, 'launcher': launcher, 'device': device.upper(), 'tags': ' '.join(tags) if tags else '', 'dataset': dataset, 'dataset_size': dataset_size } with open(csv_file, 'a+', newline='') as f: writer = DictWriter(f, fieldnames=field_names) if new_file: writer.writeheader() for metric_result, metric_meta in zip(metric_results, metrics_meta): writer.writerow({ **main_info, 'metric_name': metric_result['name'], 'metric_type': metric_result['type'], 'metric_value': metric_result['value'], 'metric_target': metric_meta.get('target', 'higher-better'), 'metric_scale': metric_meta.get('scale', 100), 'metric_postfix': metric_meta.get('postfix', '%'), 'ref': metric_result.get('ref', ''), 'abs_threshold': metric_result.get('abs_threshold', 0), 'rel_threshold': metric_result.get('rel_threshold', '') }) " 47193,"def get_modified_python_files(): """""" Return a list of python files that have been modified between the current head and the master branch. """""" repo = Repo(""."") print(f""Master is at {repo.refs.master.commit}"") print(f""Current head is at {repo.head.commit}"") branching_commits = repo.merge_base(repo.refs.master, repo.head) for commit in branching_commits: print(f""Branching commit: {commit}"") print(""\n### DIFF ###\n"") code_diff = [] for commit in branching_commits: for diff_obj in commit.diff(repo.head.commit): # We always add new python files if diff_obj.change_type == ""A"" and diff_obj.b_path.endswith("".py""): code_diff.append(diff_obj.b_path) # We check that deleted python files won't break correspondping tests. elif diff_obj.change_type == ""D"" and diff_obj.a_path.endswith("".py""): code_diff.append(diff_obj.a_path) # Now for modified files elif diff_obj.change_type == ""M"" and diff_obj.b_path.endswith("".py""): # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications are in code and not docstrings. if diff_is_docstring_only(repo, commit, diff_obj.b_path): print(f""Ignoring diff in {diff_obj.b_path} as it only concerns docstrings."") else: code_diff.append(diff_obj.a_path) return code_diff ","def get_modified_python_files(): """""" Return a list of python files that have been modified between the current head and the master branch. """""" repo = Repo(Path(transformers.__path__[0])) print(f""Master is at {repo.refs.master.commit}"") print(f""Current head is at {repo.head.commit}"") branching_commits = repo.merge_base(repo.refs.master, repo.head) for commit in branching_commits: print(f""Branching commit: {commit}"") print(""\n### DIFF ###\n"") code_diff = [] for commit in branching_commits: for diff_obj in commit.diff(repo.head.commit): # We always add new python files if diff_obj.change_type == ""A"" and diff_obj.b_path.endswith("".py""): code_diff.append(diff_obj.b_path) # We check that deleted python files won't break correspondping tests. elif diff_obj.change_type == ""D"" and diff_obj.a_path.endswith("".py""): code_diff.append(diff_obj.a_path) # Now for modified files elif diff_obj.change_type == ""M"" and diff_obj.b_path.endswith("".py""): # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications are in code and not docstrings. if diff_is_docstring_only(repo, commit, diff_obj.b_path): print(f""Ignoring diff in {diff_obj.b_path} as it only concerns docstrings."") else: code_diff.append(diff_obj.a_path) return code_diff " 45932,"def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r""""""Compensate an image for lens distortion. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: image: Input image with shape :math:`(*, N, C, H, W)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted image with shape :math:`(*, C, H, W)`. Example: >>> img = torch.rand(1, 3, 5, 5) >>> K = torch.eye(3)[None] >>> dist_coeff = torch.rand(4) >>> out = undistort_image(img, K, dist_coeff) >>> out.shape torch.Size([1, 3, 5, 5]) """""" if len(image.shape) < 4: raise ValueError(f""Image shape is invalid. Got: {image.shape}."") if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.') if not image.is_floating_point(): raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.') B_dims, image_dims = image.shape[:-3], image.shape[-3:] B = int(torch.prod(torch.tensor(B_dims))) channels, rows, cols = image_dims # Create point coordinates for each pixel of the image xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype) pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates # Distort points and define maps ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2 mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float # Remap image to undistort out = remap(image.reshape(-1, channels, rows, cols), mapx, mapy, align_corners=True) return out.reshape(B_dims + image_dims) ","def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor: r""""""Compensate an image for lens distortion. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: image: Input image with shape :math:`(*, N, C, H, W)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted image with shape :math:`(*, C, H, W)`. Example: >>> img = torch.rand(1, 3, 5, 5) >>> K = torch.eye(3)[None] >>> dist_coeff = torch.rand(4) >>> out = undistort_image(img, K, dist_coeff) >>> out.shape torch.Size([1, 3, 5, 5]) """""" if len(image.shape) < 4: raise ValueError(f""Image shape is invalid. Got: {image.shape}."") if K.shape[-2:] != (3, 3): raise ValueError(f'K matrix shape is invalid. Got {K.shape}.') if dist.shape[-1] not in [4, 5, 8, 12, 14]: raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.') if not image.is_floating_point(): raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.') B_dims, image_dims = image.shape[:-3], image.shape[-3:] B = int(torch.prod(torch.tensor(B_dims))) channels, rows, cols = image_dims # Create point coordinates for each pixel of the image xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype) pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates # Distort points and define maps ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2 mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float # Remap image to undistort out = remap(image.reshape(-1, channels, rows, cols), mapx, mapy, align_corners=True) return out.view_as(image) " 24347,"def construct_template_fields(integration_name, repo_choice, **kwargs): normalized_integration_name = normalize_package_name(integration_name) check_name_kebab = kebab_case_name(integration_name) datadog_checks_base_req = 'datadog-checks-base[deps]>=6.6.0' third_party_install_info = f""""""\ To install the {integration_name} check on your host: 1. Install the [developer toolkit] (https://docs.datadoghq.com/developers/integrations/new_check_howto/#developer-toolkit) on any machine. 2. Run `ddev release build {normalized_integration_name}` to build the package. 3. [Download the Datadog Agent](https://app.datadoghq.com/account/settings#agent). 4. Upload the build artifact to any host with an Agent and run `datadog-agent integration install -w path/to/{normalized_integration_name}/dist/.whl`. """""" if repo_choice == 'core': check_name = normalized_integration_name author = 'Datadog' email = 'help@datadoghq.com' email_packages = 'packages@datadoghq.com' install_info = ( 'The {integration_name} check is included in the [Datadog Agent][2] package.\n' 'No additional installation is needed on your server.'.format(integration_name=integration_name) ) license_header = ( '# (C) Datadog, Inc. {year}-present\n' '# All rights reserved\n' '# Licensed under a 3-clause BSD style license (see LICENSE)'.format(year=str(datetime.utcnow().year)) ) support_type = 'core' test_dev_dep = '-e ../datadog_checks_dev' tox_base_dep = '-e../datadog_checks_base[deps]' elif repo_choice == 'marketplace': check_name = normalize_package_name(f""{kwargs.get('author')}_{normalized_integration_name}"") # Updated by the kwargs passed in author = '' email = '' email_packages = '' install_info = third_party_install_info # Static fields license_header = '' support_type = 'partner' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req else: check_name = normalized_integration_name author = 'U.N. Owen' email = email_packages = 'friend@datadog.community' install_info = third_party_install_info license_header = '' support_type = 'contrib' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req config = { 'author': author, 'check_class': f""{''.join(part.capitalize() for part in normalized_integration_name.split('_'))}Check"", 'check_name': check_name, 'integration_name': integration_name, 'check_name_kebab': check_name_kebab, 'email': email, 'email_packages': email_packages, 'guid': uuid4(), 'license_header': license_header, 'install_info': install_info, 'repo_choice': repo_choice, 'support_type': support_type, 'test_dev_dep': test_dev_dep, 'tox_base_dep': tox_base_dep, } config.update(kwargs) return config ","def construct_template_fields(integration_name, repo_choice, **kwargs): normalized_integration_name = normalize_package_name(integration_name) check_name_kebab = kebab_case_name(integration_name) datadog_checks_base_req = 'datadog-checks-base[deps]>=6.6.0' third_party_install_info = f""""""\ To install the {integration_name} check on your host: 1. Install the [developer toolkit] (https://docs.datadoghq.com/developers/integrations/new_check_howto/#developer-toolkit) on any machine. 2. Run `ddev release build {normalized_integration_name}` to build the package. 3. [Download the Datadog Agent](https://app.datadoghq.com/account/settings#agent). 4. Upload the build artifact to any host with an Agent and run `datadog-agent integration install -w path/to/{normalized_integration_name}/dist/.whl`."""""" if repo_choice == 'core': check_name = normalized_integration_name author = 'Datadog' email = 'help@datadoghq.com' email_packages = 'packages@datadoghq.com' install_info = ( 'The {integration_name} check is included in the [Datadog Agent][2] package.\n' 'No additional installation is needed on your server.'.format(integration_name=integration_name) ) license_header = ( '# (C) Datadog, Inc. {year}-present\n' '# All rights reserved\n' '# Licensed under a 3-clause BSD style license (see LICENSE)'.format(year=str(datetime.utcnow().year)) ) support_type = 'core' test_dev_dep = '-e ../datadog_checks_dev' tox_base_dep = '-e../datadog_checks_base[deps]' elif repo_choice == 'marketplace': check_name = normalize_package_name(f""{kwargs.get('author')}_{normalized_integration_name}"") # Updated by the kwargs passed in author = '' email = '' email_packages = '' install_info = third_party_install_info # Static fields license_header = '' support_type = 'partner' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req else: check_name = normalized_integration_name author = 'U.N. Owen' email = email_packages = 'friend@datadog.community' install_info = third_party_install_info license_header = '' support_type = 'contrib' test_dev_dep = 'datadog-checks-dev' tox_base_dep = datadog_checks_base_req config = { 'author': author, 'check_class': f""{''.join(part.capitalize() for part in normalized_integration_name.split('_'))}Check"", 'check_name': check_name, 'integration_name': integration_name, 'check_name_kebab': check_name_kebab, 'email': email, 'email_packages': email_packages, 'guid': uuid4(), 'license_header': license_header, 'install_info': install_info, 'repo_choice': repo_choice, 'support_type': support_type, 'test_dev_dep': test_dev_dep, 'tox_base_dep': tox_base_dep, } config.update(kwargs) return config " 27838,"def read_dbpedia(tf, split, shrink=1, char_based=False): dataset = [] f = tf.extractfile('dbpedia_csv/{}.csv'.format(split)) if sys.version_info > (3, 0): f = io.TextIOWrapper(f, encoding='utf-8') for i, (label, title, text) in enumerate(csv.reader(f)): if i % shrink != 0: continue label = int(label) - 1 # Index begins from 1 tokens = split_text(normalize_text(text), char_based) dataset.append((tokens, label)) return dataset ","def read_dbpedia(tf, split, shrink=1, char_based=False): dataset = [] f = tf.extractfile('dbpedia_csv/{}.csv'.format(split)) if sys.version_info > (3, 0): f = io.TextIOWrapper(f, encoding='utf-8') for i, (label, title, text) in enumerate(csv.reader(f)): if i % shrink != 0: continue label = int(label) - 1 # Index begins from 1 tokens = split_text(normalize_text(text), char_based) dataset.append((tokens, label)) return dataset " 30623,"def get_results_command(client: Client, args: Dict): task_id = args.get('task_id', '') raw: Dict = client.get_results_request(task_id) status = raw.get('status') results = raw.get('results', {}) if not status: raise Exception(""Invalid response from BPA"") job_checks: List[Dict] = [] if status == 'invalid': raise Exception(""Job ID not valid or doesn't exist"") if status == 'complete': bpa = results.get('bpa', {}) if not bpa: raise Exception(""Invalid response from BPA"") for category_name, features in bpa.items(): for feature_name, feature_contents in features.items(): if not feature_contents: # Empty list, no checks continue checks = get_checks_from_feature(feature_contents[0], feature_name, category_name) job_checks.extend(checks) download_url = results.get('download_url') # check that a report was generated, and can be downloaded if download_url is not None: download_report_handler(client, task_id) context = {'PAN-OS-BPA.JobResults(val.JobID && val.JobID === obj.JobID)': { 'JobID': task_id, 'Checks': job_checks, 'Status': status }} human_readable = tableToMarkdown('BPA Results', job_checks) return human_readable, context, results ","def get_results_command(client: Client, args: Dict): task_id = args.get('task_id', '') raw: Dict = client.get_results_request(task_id) status = raw.get('status') results = raw.get('results', {}) if not status: raise Exception(""Invalid response from BPA"") job_checks: List[Dict] = [] if status == 'invalid': raise Exception(""Job ID not valid or doesn't exist"") if status == 'complete': bpa = results.get('bpa', {}) if not bpa: raise Exception(""Invalid response from BPA"") for category_name, features in bpa.items(): for feature_name, feature_contents in features.items(): if not feature_contents: # Empty list, no checks continue checks = get_checks_from_feature(feature_contents[0], feature_name, category_name) job_checks.extend(checks) download_url = results.get('download_url') # check that a report was generated, and can be downloaded if download_url: download_report_handler(client, task_id) context = {'PAN-OS-BPA.JobResults(val.JobID && val.JobID === obj.JobID)': { 'JobID': task_id, 'Checks': job_checks, 'Status': status }} human_readable = tableToMarkdown('BPA Results', job_checks) return human_readable, context, results " 25267,"def single_attribute(attribute_path=None): """"""Creates a projection that extracts the value of an attribute path. Args: attribute_path (str): Extracts values from this path, if given. Returns: Projection[any]: A projection that extracts the value of the given attribute path. """""" return _SingleAttributeProjection(attribute_path) ","def single_attribute(attribute_path): """"""Creates a projection that extracts the value of an attribute path. Args: attribute_path (str): Extracts values from this path, if given. Returns: Projection[any]: A projection that extracts the value of the given attribute path. """""" return _SingleAttributeProjection(attribute_path) " 32063,"def test_get_timeout(): """""" Given - Different valid values for timeout and retries parameters When - before setting aws config instance Then - validates the logic of setting read_timeout and connect_timeout values """""" (read, connect) = AWSClient.get_timeout(None) assert read == 60 and connect == 10 (read, connect) = AWSClient.get_timeout(""100"") assert read == 100 and connect == 10 (read, connect) = AWSClient.get_timeout(""200,2"") assert read == 200 and connect == 2 (read, connect) = AWSClient.get_timeout(60) assert read == 60 and connect == 10 (read, connect) = AWSClient.get_timeout(u""60, 10"") assert read == 60 and connect == 10 ","def test_get_timeout(): """""" Given - Different valid values for timeout and retries parameters When - before setting aws config instance Then - validates the logic of setting read_timeout and connect_timeout values """""" (read, connect) = AWSClient.get_timeout(None) assert read == 60 and connect == 10 (read, connect) = AWSClient.get_timeout(""100"") assert read == 100 and connect == 10 (read, connect) = AWSClient.get_timeout(""200,2"") assert read == 200 and connect == 2 (read, connect) = AWSClient.get_timeout(60) assert read == 60 and connect == 10 (read, connect) = AWSClient.get_timeout(u""60, 10"") assert read == 60 and connect == 10, 'Could not decode unicode timeout' " 40007,"def main(): ep = """""" use --nspath to validate against an extension. If --ns is not specified, validate against all namespaces in namespace file. """""" parser = ArgumentParser(description=""Validate an NWB file"", epilog=ep) parser.add_argument(""paths"", type=str, nargs='+', help=""NWB file paths"") parser.add_argument('-p', '--nspath', type=str, help=""the path to the namespace YAML file"") parser.add_argument(""-n"", ""--ns"", type=str, help=""the namespace to validate against"") parser.add_argument(""-lns"", ""--list-namespaces"", dest=""list_namespaces"", action='store_true', help=""List the available namespaces and exit."") feature_parser = parser.add_mutually_exclusive_group(required=False) feature_parser.add_argument(""--cached-namespace"", dest=""cached_namespace"", action='store_true', help=""Use the cached namespace (default: %(default)s)."", default=True) feature_parser.add_argument('--no-cached-namespace', dest=""cached_namespace"", action='store_false', help=""Don't use the cached namespace."") feature_parser.add_argument('--severity', dest=""severity"", type=int, help=""Report anything with the given severity or higher as error (default: %(default)s)."", default=10, choices=range(0, 11)) args = parser.parse_args() ret = 0 if args.nspath: if not os.path.isfile(args.nspath): print(""The namespace file {} is not a valid file."".format(args.nspath), file=sys.stderr) sys.exit(1) if args.cached_namespace: print(""Turning off validation against cached namespace information "" ""as --nspath was passed."", file=sys.stderr) args.cached_namespace = False for path in args.paths: if not os.path.isfile(path): print(""The file {} does not exist."".format(path), file=sys.stderr) ret = 1 continue if args.cached_namespace: catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) ns_deps = NWBHDF5IO.load_namespaces(catalog, path) s = set(ns_deps.keys()) # determine which namespaces are the most for k in ns_deps: # specific (i.e. extensions) and validate s -= ns_deps[k].keys() # against those namespaces = list(sorted(s)) if len(namespaces) > 0: tm = TypeMap(catalog) manager = BuildManager(tm) specloc = ""cached namespace information"" else: manager = None namespaces = [CORE_NAMESPACE] specloc = ""pynwb namespace information"" print(""The file {} has no cached namespace information. "" ""Falling back to {}."".format(path, specloc), file=sys.stderr) elif args.nspath: catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) namespaces = catalog.load_namespaces(args.nspath) if len(namespaces) == 0: print(""Could not load namespaces from file {}."".format(args.nspath), file=sys.stderr) sys.exit(1) tm = TypeMap(catalog) manager = BuildManager(tm) specloc = ""--nspath namespace information"" else: manager = None namespaces = [CORE_NAMESPACE] specloc = ""pynwb namespace information"" if args.list_namespaces: print(""\n"".join(namespaces)) ret = 0 continue if args.ns: if args.ns in namespaces: namespaces = [args.ns] else: print(""The namespace {} could not be found in {} as only {} is present."".format( args.ns, specloc, namespaces), file=sys.stderr) ret = 1 continue with NWBHDF5IO(path, mode='r', manager=manager) as io: for ns in namespaces: print(""Validating {} against {} using namespace {}."".format(path, specloc, ns)) ret = ret or _validate_helper(io=io, namespace=ns, severity=args.severity) sys.exit(ret) ","def main(): ep = """""" use --nspath to validate against an extension. If --ns is not specified, validate against all namespaces in namespace file. """""" parser = ArgumentParser(description=""Validate an NWB file"", epilog=ep) parser.add_argument(""paths"", type=str, nargs='+', help=""NWB file paths"") parser.add_argument('-p', '--nspath', type=str, help=""the path to the namespace YAML file"") parser.add_argument(""-n"", ""--ns"", type=str, help=""the namespace to validate against"") parser.add_argument(""-lns"", ""--list-namespaces"", dest=""list_namespaces"", action='store_true', help=""List the available namespaces and exit."") feature_parser = parser.add_mutually_exclusive_group(required=False) feature_parser.add_argument(""--cached-namespace"", dest=""cached_namespace"", action='store_true', help=""Use the cached namespace (default: %(default)s)."", default=True) feature_parser.add_argument('--no-cached-namespace', dest=""cached_namespace"", action='store_false', help=""Don't use the cached namespace."") feature_parser.add_argument('--severity', dest=""severity"", type=int, help=""Report anything with the given severity or higher as errors (default: %(default))."", default=10, choices=range(0, 11)) args = parser.parse_args() ret = 0 if args.nspath: if not os.path.isfile(args.nspath): print(""The namespace file {} is not a valid file."".format(args.nspath), file=sys.stderr) sys.exit(1) if args.cached_namespace: print(""Turning off validation against cached namespace information "" ""as --nspath was passed."", file=sys.stderr) args.cached_namespace = False for path in args.paths: if not os.path.isfile(path): print(""The file {} does not exist."".format(path), file=sys.stderr) ret = 1 continue if args.cached_namespace: catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) ns_deps = NWBHDF5IO.load_namespaces(catalog, path) s = set(ns_deps.keys()) # determine which namespaces are the most for k in ns_deps: # specific (i.e. extensions) and validate s -= ns_deps[k].keys() # against those namespaces = list(sorted(s)) if len(namespaces) > 0: tm = TypeMap(catalog) manager = BuildManager(tm) specloc = ""cached namespace information"" else: manager = None namespaces = [CORE_NAMESPACE] specloc = ""pynwb namespace information"" print(""The file {} has no cached namespace information. "" ""Falling back to {}."".format(path, specloc), file=sys.stderr) elif args.nspath: catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) namespaces = catalog.load_namespaces(args.nspath) if len(namespaces) == 0: print(""Could not load namespaces from file {}."".format(args.nspath), file=sys.stderr) sys.exit(1) tm = TypeMap(catalog) manager = BuildManager(tm) specloc = ""--nspath namespace information"" else: manager = None namespaces = [CORE_NAMESPACE] specloc = ""pynwb namespace information"" if args.list_namespaces: print(""\n"".join(namespaces)) ret = 0 continue if args.ns: if args.ns in namespaces: namespaces = [args.ns] else: print(""The namespace {} could not be found in {} as only {} is present."".format( args.ns, specloc, namespaces), file=sys.stderr) ret = 1 continue with NWBHDF5IO(path, mode='r', manager=manager) as io: for ns in namespaces: print(""Validating {} against {} using namespace {}."".format(path, specloc, ns)) ret = ret or _validate_helper(io=io, namespace=ns, severity=args.severity) sys.exit(ret) " 59618,"def use_alias(**aliases): """""" Decorator to add aliases to keyword arguments of a function. Use this decorator above the argument parsing decorators, usually only below ``fmt_docstring``. Replaces the aliases with their desired names before passing them along to the module function. Keywords passed to this decorator are the desired argument name and their value is the alias. Adds a dictionary attribute to the function with the aliases. Use in conjunction with ``fmt_docstring`` to insert a list of valid aliases in your docstring. Examples -------- >>> @use_alias(R='region', J='projection') ... def my_module(**kwargs): ... print('R =', kwargs['R'], 'J =', kwargs['J']) >>> my_module(R='bla', J='meh') R = bla J = meh >>> my_module(region='bla', J='meh') R = bla J = meh >>> my_module(R='bla', projection='meh') R = bla J = meh >>> my_module(region='bla', projection='meh') R = bla J = meh >>> my_module(region='bla', projection='meh', J=""bla"") Traceback (most recent call last): ... pygmt.exceptions.GMTInvalidInput: Arguments in short-form (J) and long-form (projection) can't coexist """""" def alias_decorator(module_func): """""" Decorator that replaces the aliases for arguments. """""" @functools.wraps(module_func) def new_module(*args, **kwargs): """""" New module that parses and replaces the registered aliases. """""" for arg, alias in aliases.items(): if alias in kwargs and arg in kwargs: raise GMTInvalidInput( f""Arguments in short-form ({arg}) and long-form ({alias}) can't coexist"" ) if alias in kwargs: kwargs[arg] = kwargs.pop(alias) return module_func(*args, **kwargs) new_module.aliases = aliases return new_module return alias_decorator ","def use_alias(**aliases): """""" Decorator to add aliases to keyword arguments of a function. Use this decorator above the argument parsing decorators, usually only below ``fmt_docstring``. Replaces the aliases with their desired names before passing them along to the module function. Keywords passed to this decorator are the desired argument name and their value is the alias. Adds a dictionary attribute to the function with the aliases. Use in conjunction with ``fmt_docstring`` to insert a list of valid aliases in your docstring. Examples -------- >>> @use_alias(R='region', J='projection') ... def my_module(**kwargs): ... print('R =', kwargs['R'], 'J =', kwargs['J']) >>> my_module(R='bla', J='meh') R = bla J = meh >>> my_module(region='bla', J='meh') R = bla J = meh >>> my_module(R='bla', projection='meh') R = bla J = meh >>> my_module(region='bla', projection='meh') R = bla J = meh >>> my_module( ... region='bla', projection='meh', J=""bla"" ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... pygmt.exceptions.GMTInvalidInput: Arguments in short-form (J) and long-form (projection) can't coexist """""" def alias_decorator(module_func): """""" Decorator that replaces the aliases for arguments. """""" @functools.wraps(module_func) def new_module(*args, **kwargs): """""" New module that parses and replaces the registered aliases. """""" for arg, alias in aliases.items(): if alias in kwargs and arg in kwargs: raise GMTInvalidInput( f""Arguments in short-form ({arg}) and long-form ({alias}) can't coexist"" ) if alias in kwargs: kwargs[arg] = kwargs.pop(alias) return module_func(*args, **kwargs) new_module.aliases = aliases return new_module return alias_decorator " 25141,"def _looks_like_type_subscript(node): """""" Try to figure out if a Name node is used inside a type related subscript :param node: node to check :type node: nodes.Name :return: true if the node is a Name node inside a type related subscript :rtype: bool """""" if isinstance(node, nodes.Name) and isinstance(node.parent, nodes.Subscript): return node.name == ""type"" return False ","def _looks_like_type_subscript(node: nodes.Name) -> bool: """""" Try to figure out if a Name node is used inside a type related subscript :param node: node to check :type node: nodes.Name :return: true if the node is a Name node inside a type related subscript :rtype: bool """""" if isinstance(node, nodes.Name) and isinstance(node.parent, nodes.Subscript): return node.name == ""type"" return False " 42619,"def asset_from_gemini(symbol: str) -> Asset: """"""May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """""" if not isinstance(symbol, str): raise DeserializationError(f'Got non-string type {type(symbol)} for gemini asset') if symbol in UNSUPPORTED_GEMENI_ASSETS: raise UnsupportedAsset(symbol) name = GEMENI_TO_WORLD.get(symbol, symbol) return symbol_to_asset_or_token(name) ","def asset_from_gemini(symbol: str) -> Asset: """"""May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """""" if not isinstance(symbol, str): raise DeserializationError(f'Got non-string type {type(symbol)} for gemini asset') if symbol in UNSUPPORTED_GEMENI_ASSETS: raise UnsupportedAsset(symbol) name = GEMINI_TO_WORLD.get(symbol, symbol) return symbol_to_asset_or_token(name) " 43129,"def test_predict(): model_endpoint = 'http://localhost:5000/model/predict' file_path = 'samples/a-pen-i-am.jpg' with open(file_path, 'rb') as file: file_form = {'image': (file_path, file, 'image/jpeg')} r = requests.post(url=model_endpoint, files=file_form) assert r.status_code == 200 response = r.json() assert response['status'] == 'ok' # Teddy Bear # assert response['predictions'][0]['label_id'] == '88' assert (response['predictions'][0]['label'] == 'toy' or response['predictions'][0]['label'] == 'pen') # assert response['predictions'][0]['probability'] > 0.95 ","def test_predict(): model_endpoint = 'http://localhost:5000/model/predict' file_path = 'samples/a-pen-i-am.jpg' with open(file_path, 'rb') as file: file_form = {'image': (file_path, file, 'image/jpeg')} r = requests.post(url=model_endpoint, files=file_form) assert r.status_code == 200 response = r.json() assert response['status'] == 'ok' # Teddy Bear # assert response['predictions'][0]['label_id'] == '88' assert response['predictions'][0]['label'] in ('toy', 'pen') # assert response['predictions'][0]['probability'] > 0.95 " 32663,"def make_pd_writer(**kwargs) -> Callable: """"""This returns a pd_writer with the desired arguments. Example usage: import pandas as pd from snowflake.connector.pandas_tools import make_pd_writer sf_connector_version_df = pd.DataFrame([('snowflake-connector-python', '1.0')], columns=['NAME', 'NEWEST_VERSION']) sf_connector_version_df.to_sql('driver_versions', engine, index=False, method=make_pd_writer(quote_identifiers=False)) # to keep quote_identifiers=True from functools import partial sf_connector_version_df.to_sql( 'driver_versions', engine, index=False, method=make_pd_writer())) Args: kwargs: A dictionary that specifies which arguments to pass to pd_writer """""" # default argument map default_args = {""quote_identifiers"": True} # overwrite with value in kwargs, if it exists for k in default_args.keys(): if k in kwargs: default_args[k] = kwargs[k] return partial(pd_writer, **default_args) ","def make_pd_writer(**kwargs) -> Callable: """"""This returns a pd_writer with the desired arguments. Example usage: import pandas as pd from snowflake.connector.pandas_tools import make_pd_writer sf_connector_version_df = pd.DataFrame([('snowflake-connector-python', '1.0')], columns=['NAME', 'NEWEST_VERSION']) sf_connector_version_df.to_sql('driver_versions', engine, index=False, method=make_pd_writer(quote_identifiers=False)) # to keep quote_identifiers=True from functools import partial sf_connector_version_df.to_sql( 'driver_versions', engine, index=False, method=make_pd_writer())) Args: kwargs: A dictionary that specifies which arguments to pass to pd_writer """""" # default argument map default_args = {""quote_identifiers"": True} default_args.update(kwargs) return partial(pd_writer, **default_args) " 31362,"def escalation_reasons_command(client: Client) -> CommandResults: """"""Get escalation reasons list from TOPdesk"""""" escalation_reasons = client.get_list(""/incidents/escalation-reasons"") return command_with_all_fields_readable_list(results=escalation_reasons, result_name='escalation reasons', output_prefix='escalation_reason', outputs_key_field='id') ","def escalation_reasons_command(client: Client) -> CommandResults: """"""Get escalation reasons list from TOPdesk"""""" escalation_reasons = client.get_list(""/incidents/escalation-reasons"") return command_with_all_fields_readable_list(results=escalation_reasons, result_name='escalation reasons', output_prefix='EscalationReason', outputs_key_field='id') " 29782,"def add_participant_record(studydir, subject, age, sex): participants_tsv = op.join(studydir, 'participants.tsv') participant_id = 'sub-%s' % subject if not create_file_if_missing(participants_tsv, '\t'.join(['participant_id', 'age', 'sex', 'group']) + '\n'): # check if may be subject record already exists with open(participants_tsv) as f: f.readline() known_subjects = {l.split('\t')[0] for l in f.readlines()} if participant_id in known_subjects: return else: # Populate particpants.json (an optional file to describe column names in # participant.tsv). This auto generation will make BIDS-validator happy. participants_json = op.join(studydir, 'participants.json') if not op.lexists(participants_json): save_json(participants_json, OrderedDict([ (""participant_id"", OrderedDict([ (""Description"", ""Participant identifier"")])), (""age"", OrderedDict([ (""Description"", ""Age in years (TODO - verify) as in the initial"" "" session, might not be correct for other sessions"")])), (""sex"", OrderedDict([ (""Description"", ""self-rated by participant, M for male/F for "" ""female (TODO: verify)"")])), (""group"", OrderedDict([ (""Description"", ""(TODO: adjust - by default everyone is in "" ""control group)"")])), ]), sort_keys=False) # Add a new participant with open(participants_tsv, 'a') as f: f.write( '\t'.join(map(str, [participant_id, age.lstrip('0').rstrip('Y') if age else 'N/A', sex if sex else 'N/A', 'control'])) + '\n') ","def add_participant_record(studydir, subject, age, sex): participants_tsv = op.join(studydir, 'participants.tsv') participant_id = 'sub-%s' % subject if not create_file_if_missing(participants_tsv, '\t'.join(['participant_id', 'age', 'sex', 'group']) + '\n'): # check if may be subject record already exists with open(participants_tsv) as f: f.readline() known_subjects = {l.split('\t')[0] for l in f.readlines()} if participant_id in known_subjects: return else: # Populate particpants.json (an optional file to describe column names in # participant.tsv). This auto generation will make BIDS-validator happy. participants_json = op.join(studydir, 'participants.json') if not op.lexists(participants_json): save_json(participants_json, OrderedDict([ (""participant_id"", OrderedDict([ (""Description"", ""Participant identifier"")])), (""age"", OrderedDict([ (""Description"", ""Age in years (TODO - verify) as in the initial"" "" session, might not be correct for other sessions"")])), (""sex"", OrderedDict([ (""Description"", ""self-rated by participant, M for male/F for "" ""female (TODO: verify)"")])), (""group"", OrderedDict([ (""Description"", ""(TODO: adjust - by default everyone is in "" ""control group)"")])), ]), sort_keys=False) # Add a new participant with open(participants_tsv, 'a') as f: f.write( '\t'.join(map(str, [participant_id, age.lstrip('0').rstrip('Y') if age else 'N/A', sex if sex else 'n/a', 'control'])) + '\n') " 30274,"def get_spf(auth, spf): spf_context = {} if auth is None: spf_context[""Validation-Result""] = spf.split("" "")[0].lower() sender_ip = re.findall(r""\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", spf) else: result = re.search(r""spf=(\w+)"", auth) if result is not None: spf_context[""Validation-Result""] = result.group(1).lower() sender_ip = re.findall(r""\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", auth) if sender_ip != []: spf_context[""Sender-IP""] = sender_ip[0] if spf is not None: spf_context[""Reason""] = re.findall(r""[(](.+)[)]"", spf)[0] return spf_context ","def get_spf(auth, spf): spf_context = {} if auth is None: spf_context[""Validation-Result""] = spf.split("" "")[0].lower() sender_ip = re.findall(r""\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", spf) else: result = re.search(r""spf=(\w+)"", auth) if result is not None: spf_context[""Validation-Result""] = result.group(1).lower() sender_ip = re.findall(r""\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"", auth) if sender_ip: spf_context[""Sender-IP""] = sender_ip[0] if spf is not None: spf_context[""Reason""] = re.findall(r""[(](.+)[)]"", spf)[0] return spf_context " 11365,"def _convert_datetime_to_utc_int(expires_on): epoch = time.localtime().tm_gmtoff return time.mktime(expires_on.timetuple()) + epoch ","def _convert_datetime_to_utc_int(input_datetime): epoch = time.localtime().tm_gmtoff return time.mktime(expires_on.timetuple()) + epoch " 42373,"def get_category_pages(path: Path) -> dict[str, dict]: """"""Get all page names and their metadata at a category path."""""" # Special handling for tags if path == Path(__file__).parent / ""resources/tags"": tags = {} for tag in get_tags(): content = frontmatter.parse(tag.body)[1] if len(content) > 100: # Trim the preview to a maximum of 100 visible characters # This causes some markdown to break, but we ignore that content = content[:100] + ""..."" tags[tag.name] = { ""title"": tag.name, ""description"": markdown.markdown(content), ""icon"": ""fas fa-tag"" } return {name: tags[name] for name in sorted(tags)} pages = {} for item in path.glob(""*.md""): # Only list page if there is no category with the same name if item.is_file() and not item.with_suffix("""").is_dir(): pages[item.stem] = frontmatter.load(item).metadata return pages ","def get_category_pages(path: Path) -> dict[str, dict]: """"""Get all page names and their metadata at a category path."""""" # Special handling for tags if path == Path(__file__).parent / ""resources/tags"": tags = {} for tag in get_tags(): content = frontmatter.parse(tag.body)[1] if len(content) > 100: # Trim the preview to a maximum of 100 visible characters # This causes some markdown to break, but we ignore that content = content[:100] + ""…"" tags[tag.name] = { ""title"": tag.name, ""description"": markdown.markdown(content), ""icon"": ""fas fa-tag"" } return {name: tags[name] for name in sorted(tags)} pages = {} for item in path.glob(""*.md""): # Only list page if there is no category with the same name if item.is_file() and not item.with_suffix("""").is_dir(): pages[item.stem] = frontmatter.load(item).metadata return pages " 20256,"def assemble_output(): rows = [] for page in EnforcementActionPage.objects.all(): if not page.live: continue url = 'https://consumerfinance.gov' + page.get_url() if 'enforcement/actions' not in url: continue page_categories = ','.join( c.get_name_display() for c in page.categories.all()) content = '' soup = BeautifulSoup(str(page.content), 'html.parser') para = soup.findAll(['p', 'h5']) for p in para: content += p.get_text() link = p.find('a', href=True) if link: content += ': ' content += link['href'] content += '\n' row = { 'Title': page.title, 'Content': content, 'Forum': page_categories, 'Docket Numbers': ','.join( d.docket_number for d in page.docket_numbers.all()), 'Initial Filing Date': page.initial_filing_date, 'Statuses': ','.join( d.status for d in page.statuses.all()), 'Products': ','.join( d.product for d in page.products.all()), 'URL': url } rows.append(row) return rows ","def assemble_output(): rows = [] for page in EnforcementActionPage.objects.all(): if not page.live: continue url = 'https://consumerfinance.gov' + page.get_url() if 'enforcement/actions' not in url: continue page_categories = ','.join( c.get_name_display() for c in page.categories.all()) content = '' soup = BeautifulSoup(str(page.content), 'html.parser') para = soup.findAll(['p', 'h5']) for p in para: content += p.get_text() link = p.find('a', href=True) if link: content += ': ' content += link['href'] content += '\n' row = { 'Title': page.title, 'Content': content, 'Forum': page_categories, 'Docket Numbers': ','.join( d.docket_number for d in page.docket_numbers.all()), 'Initial Filing Date': page.initial_filing_date, 'Statuses': ','.join( d.get_status_display() for d in page.statuses.all()), 'Products': ','.join( d.product for d in page.products.all()), 'URL': url } rows.append(row) return rows " 26401,"def create_project_iam(dependencies, role_member_list): """""" Grant the shared project IAM permissions. """""" resources = [ { # Get the IAM policy first, so as not to remove # any existing bindings. 'name': 'project-iam-policy', 'type': 'cft-iam_project_member.py', 'properties': { 'projectID': '$(ref.project.projectId)', 'roles': role_member_list }, 'metadata': { 'dependsOn': dependencies, 'runtimePolicy': ['UPDATE_ALWAYS'] } } ] return resources ","def create_project_iam(dependencies, role_member_list): """""" Grant the shared project IAM permissions. """""" resources = [ { # Get the IAM policy first, so as not to remove # any existing bindings. 'name': 'project-iam-policy', 'type': 'cft-iam_project_member.py', 'properties': { 'projectId': '$(ref.project.projectId)', 'roles': role_member_list }, 'metadata': { 'dependsOn': dependencies, 'runtimePolicy': ['UPDATE_ALWAYS'] } } ] return resources " 58813,"def dnnl_conv2d( src, weights, stride, padding, dilation, groups, channel_last=False, out_dtype=""float32"", **kwargs, ): """"""Convolution operator in NCHW layout. Parameters ---------- src : tvm.te.Tensor 4-D with shape [batch, in_channel, in_height, in_width] weights : tvm.te.Tensor 4-D with shape [num_filter, in_channel, filter_height, filter_width] stride : int or a list/tuple of two ints Stride size, or [stride_height, stride_width] padding : int or a list/tuple of 2 or 4 ints padding size, or [pad_height, pad_width] for 2 ints, or [pad_top, pad_left, pad_bottom, pad_right] for 4 ints dilation: int or a list/tuple of two ints dilation size, or [dilation_height, dilation_width] groups: str input data layout: NCHW or NHWC channel_last: bool chose if input/output data format is in channel_last format(NHWC) or in plain format(NCHW) out_dtype: str output datatype: now only support float32 Returns ------- Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """""" assert isinstance(stride, int) or len(stride) == 2 assert isinstance(dilation, int) or len(dilation) == 2 if isinstance(stride, int): stride_h = stride_w = stride else: stride_h, stride_w = stride if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = dilation pre_cast = False post_cast = False if src.dtype == ""float32"": pre_cast = True elif src.dtype == ""bfloat16"": pre_cast = False if out_dtype == ""float32"": post_cast = True elif out_dtype == ""bfloat16"": post_cast = False if channel_last: batch, in_height, in_width, _ = src.shape kernel_h, kernel_w, _, num_filter = weights.shape else: batch, _, in_height, in_width = src.shape num_filter, _, kernel_h, kernel_w = weights.shape dilated_kernel_h = (kernel_h - 1) * dilation_h + 1 dilated_kernel_w = (kernel_w - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) out_channel = num_filter out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1 out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1 if channel_last: out_shape = (batch, out_height, out_width, out_channel) else: out_shape = (batch, out_channel, out_height, out_width) return te.extern( out_shape, [src, weights], lambda ins, outs: tvm.tir.call_packed( ""tvm.contrib.dnnl.conv2d"", ins[0], ins[1], outs[0], pad_top, pad_down, pad_left, pad_right, stride[0], stride[1], groups, channel_last, pre_cast, post_cast, ), name=""C"", dtype=out_dtype, **kwargs, ) ","def dnnl_conv2d( src, weights, stride, padding, dilation, groups, channel_last=False, out_dtype=""float32"", **kwargs, ): """"""Convolution operator in NCHW layout. Parameters ---------- src : tvm.te.Tensor 4-D with shape [batch, in_channel, in_height, in_width] weights : tvm.te.Tensor 4-D with shape [num_filter, in_channel, filter_height, filter_width] stride : int or a list/tuple of two ints Stride size, or [stride_height, stride_width] padding : int or a list/tuple of 2 or 4 ints padding size, or [pad_height, pad_width] for 2 ints, or [pad_top, pad_left, pad_bottom, pad_right] for 4 ints dilation: int or a list/tuple of two ints dilation size, or [dilation_height, dilation_width] groups: str input data layout: NCHW or NHWC channel_last: bool chose if input/output data format is in channel_last format(NHWC) or in plain format(NCHW) out_dtype: str output datatype: now only support float32 Returns ------- Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """""" assert isinstance(stride, int) or len(stride) == 2 assert isinstance(dilation, int) or len(dilation) == 2 if isinstance(stride, int): stride_h = stride_w = stride else: stride_h, stride_w = stride if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = dilation pre_cast = src.dtype == ""float32"" post_cast = out_dtype == ""float32"" if channel_last: batch, in_height, in_width, _ = src.shape kernel_h, kernel_w, _, num_filter = weights.shape else: batch, _, in_height, in_width = src.shape num_filter, _, kernel_h, kernel_w = weights.shape dilated_kernel_h = (kernel_h - 1) * dilation_h + 1 dilated_kernel_w = (kernel_w - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) out_channel = num_filter out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1 out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1 if channel_last: out_shape = (batch, out_height, out_width, out_channel) else: out_shape = (batch, out_channel, out_height, out_width) return te.extern( out_shape, [src, weights], lambda ins, outs: tvm.tir.call_packed( ""tvm.contrib.dnnl.conv2d"", ins[0], ins[1], outs[0], pad_top, pad_down, pad_left, pad_right, stride[0], stride[1], groups, channel_last, pre_cast, post_cast, ), name=""C"", dtype=out_dtype, **kwargs, ) " 56538,"def remove_job_if_exists(name, context): """"""Remove job with given name. Returns whether job was removed."""""" current_jobs = context.job_queue.get_jobs_by_name(name) job_removed = False for job in current_jobs: job.schedule_removal() job_removed = True return job_removed ","def remove_job_if_exists(name, context): """"""Remove job with given name. Returns whether job was removed."""""" current_jobs = context.job_queue.get_jobs_by_name(name) if not current_jobs: return False for job in current_jobs: job.schedule_removal() return True " 43443,"def CRot3(a, b, c): r""""""Arbitrary two-qubit controlled rotation using three Euler angles. Args: a,b,c (float): rotation angles Returns: array: unitary 4x4 rotation matrix ``rz(c) @ ry(b) @ rz(a)`` """""" return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.exp(-1*1j*(a+c)/2)*np.cos(b/2), -1*np.exp(1j*(a-c)/2)*np.sin(b/2)], [0, 0, np.exp(-1*1j*(a-c)/2)*np.sin(b/2), np.exp(1j*(a+c)/2)*np.cos(b/2)]]) ","def CRot3(a, b, c): r""""""Arbitrary two-qubit controlled rotation using three Euler angles. Args: a,b,c (float): rotation angles Returns: array: unitary 4x4 rotation matrix ``rz(c) @ ry(b) @ rz(a)`` """""" return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.exp(-1j*(a+c)/2)*np.cos(b/2), -np.exp(1j*(a-c)/2)*np.sin(b/2)], [0, 0, np.exp(-1j*(a-c)/2)*np.sin(b/2), np.exp(1j*(a+c)/2)*np.cos(b/2)]]) " 50475,"def test_imwrite_symbol_name(): name = '''#!~@$%^&`-+{};',.() []_=.jpg''' imageio.imwrite(name, np.zeros((128, 128, 3), dtype=np.uint8)) im = imageio.imread(name) assert im.shape == (128, 128, 3) ","def test_imwrite_symbol_name(): if sys.platform == ""linux"": name = '''#!~:@$%^&`-+{};',.()? []_=.jpg''' elif sys.platform == ""win32"": name = '''#!~@$%^&`-+{};',.() []_=.jpg''' elif sys.platform == ""darwin"": name = '''#!~@$%^&`-+{};',.()? []_=.jpg''' else: pytest.skip(""Unknown OS."") imageio.imwrite(name, np.zeros((128, 128, 3), dtype=np.uint8)) im = imageio.imread(name) assert im.shape == (128, 128, 3) " 29347,"def test_function(unused_arg): return OBJECT ","def test_function(unused_arg): return builtins.object " 5038,"def _validate_toolbar(s): s = ValidateInStrings( 'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s) if s == 'toolmanager': cbook._warn_external( ""Treat the new Tool classes introduced in v1.5 as experimental "" ""for now, the API will likely change in version 2.1 and perhaps "" ""the rcParam as well."") return s ","def _validate_toolbar(s): s = ValidateInStrings( 'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s) if s == 'toolmanager': cbook._warn_external( ""Treat the new Tool classes introduced in v1.5 as experimental "" ""for now, the API and rcParam may change in future versions."") return s " 17482,"def remove_duplicates(entrypoints): # sort and group entrypoints by name entrypoints = sorted(entrypoints, key=lambda ep: ep.name) entrypoints_grouped = itertools.groupby(entrypoints, key=lambda ep: ep.name) # check if there are multiple entrypoints for the same name unique_entrypoints = [] for name, matches in entrypoints_grouped: matches = list(matches) unique_entrypoints.append(matches[0]) matches_len = len(matches) if matches_len > 1: selected_module_name = matches[0].name all_module_names = [e.name for e in matches] warnings.warn( f""Found {matches_len} entrypoints for the engine name {name}:"" f""\n {all_module_names}.\n It will be used: {selected_module_name}."", RuntimeWarning, ) return unique_entrypoints ","def remove_duplicates(entrypoints): # sort and group entrypoints by name entrypoints = sorted(entrypoints, key=lambda ep: ep.name) entrypoints_grouped = itertools.groupby(entrypoints, key=lambda ep: ep.name) # check if there are multiple entrypoints for the same name unique_entrypoints = [] for name, matches in entrypoints_grouped: matches = list(matches) unique_entrypoints.append(matches[0]) matches_len = len(matches) if matches_len > 1: selected_module_name = matches[0].name all_module_names = [e.name for e in matches] warnings.warn( f""Found {matches_len} entrypoints for the engine name {name}:"" f""\n {all_module_names}.\n The entrypoint {selected_module_name} will be used."", RuntimeWarning, ) return unique_entrypoints " 29571,"def go(): check_python_3() warn(""The distributed version of dask-mpi is depricated. Please use dask-mpi, more information at http://mpi.dask.org"") main() ","def go(): check_python_3() warn(""The dask-mpi command line utility in the `distributed` package is deprecated. Please install the `dask-mpi` package instead. More information is available at https://mpi.dask.org"") main() " 22823,"def sensor_string(make, model): if make != 'unknown': # remove duplicate 'make' information in 'model' model = model.replace(make, '') return (make.strip() + ' ' + model.strip()).strip().lower() ","def sensor_string(make, model): if make != 'unknown': # remove duplicate 'make' information in 'model' model = model.replace(make, '') return '{0:s} {1:s}'.format(make.strip(), model.strip()).lower() " 27915,"def clip(a, a_min, a_max): """"""Clips the values of an array to a given interval. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: a (~chainerx.ndarray): Array containing elements to clip. a_min (scalar): Maximum value. a_max (scalar): Minimum value. Returns: ~chainerx.ndarray: An array with the elements of ``a``, but where values < ``a_min`` are replaced with ``a_min``, and those > ``a_max`` with ``a_max``. Note: The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are not supported yet. Note: During backpropagation, this function propagates the gradient of the output array to the input array ``a``. .. seealso:: :func:`numpy.clip` """""" if a_min is None: a_min = a.min() if a_max is None: a_max = a.max() return -chainerx.maximum(-chainerx.maximum(a, a_min), -a_max) ","def clip(a, a_min, a_max): """"""Clips the values of an array to a given interval. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: a (~chainerx.ndarray): Array containing elements to clip. a_min (scalar): Maximum value. a_max (scalar): Minimum value. Returns: ~chainerx.ndarray: An array with the elements of ``a``, but where values < ``a_min`` are replaced with ``a_min``, and those > ``a_max`` with ``a_max``. Note: The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are not supported yet. Note: During backpropagation, this function propagates the gradient of the output array to the input array ``a``. .. seealso:: :func:`numpy.clip` """""" if a_min is None: a_min = a.min() if a_max is None: return chainerx.maximum(a, a_min) return -chainerx.maximum(-chainerx.maximum(a, a_min), -a_max) " 33102,"def kill_scdaemon(): killed = False try: # Works for Windows. from win32com.client import GetObject from win32api import OpenProcess, CloseHandle, TerminateProcess wmi = GetObject(""winmgmts:"") ps = wmi.InstancesOf(""Win32_Process"") for p in ps: if p.Properties_(""Name"").Value == ""scdaemon.exe"": pid = p.Properties_(""ProcessID"").Value handle = OpenProcess(1, False, pid) TerminateProcess(handle, -1) CloseHandle(handle) killed = True except ImportError: # Works for Linux and OS X. return_code = subprocess.call([shutil.which(""pkill""), ""-9"", ""scdaemon""]) # nosec if return_code == 0: killed = True if killed: sleep(0.1) return killed ","def kill_scdaemon(): killed = False try: # Works for Windows. from win32com.client import GetObject from win32api import OpenProcess, CloseHandle, TerminateProcess wmi = GetObject(""winmgmts:"") ps = wmi.InstancesOf(""Win32_Process"") for p in ps: if p.Properties_(""Name"").Value == ""scdaemon.exe"": pid = p.Properties_(""ProcessID"").Value handle = OpenProcess(1, False, pid) TerminateProcess(handle, -1) CloseHandle(handle) killed = True except ImportError: # Works for Linux and OS X. return_code = subprocess.call([""pkill"", ""-9"", ""scdaemon""]) # nosec if return_code == 0: killed = True if killed: sleep(0.1) return killed " 27072,"def replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None: """""" Replaces extra requirement with provider package. The intention here is that when the provider is added as dependency of extra, there is no need to add the dependencies separately. This is not needed and even harmful, because in case of future versions of the provider, the requirements might change, so hard-coding requirements from the version that was available at the release time might cause dependency conflicts in the future. Say for example that you have salesforce provider with those deps: { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] } Initially ['salesforce'] extra has those requirements and it works like that when you install it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when the production installation is used, The dependencies are changed: { 'salesforce': ['apache-airflow-providers-salesforce'] } And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies: ['simple-salesforce>=1.0.0', 'tableauserverclient'] So transitively 'salesforce' extra has all the requirements it needs and in case the provider changes its dependencies, they will transitively change as well. In the constraint mechanism we save both - provider versions and it's dependencies version, which means that installation using constraints is repeatable. For K8s, Celery and Dask which are both ""Core executors"" and ""Providers"" we have to add the base dependencies to the core as well - in order to mitigate problems where newer version of provider will have less strict limits. This should be done for both: extras and their deprecated aliases. This is not a full protection however, the way extras work, this will not add ""hard"" limits for airflow and the user who does not use constraints :param extra: Name of the extra to add providers to :param providers: list of provider ids """""" if extra in ['cncf.kubernetes', 'kubernetes', 'celery']: EXTRAS_REQUIREMENTS[extra].extend( [get_provider_package_from_package_id(package_name) for package_name in providers] ) else: EXTRAS_REQUIREMENTS[extra] = [ get_provider_package_from_package_id(package_name) for package_name in providers ] ","def replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None: """""" Replaces extra requirement with provider package. The intention here is that when the provider is added as dependency of extra, there is no need to add the dependencies separately. This is not needed and even harmful, because in case of future versions of the provider, the requirements might change, so hard-coding requirements from the version that was available at the release time might cause dependency conflicts in the future. Say for example that you have salesforce provider with those deps: { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] } Initially ['salesforce'] extra has those requirements and it works like that when you install it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when the production installation is used, The dependencies are changed: { 'salesforce': ['apache-airflow-providers-salesforce'] } And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies: ['simple-salesforce>=1.0.0', 'tableauserverclient'] So transitively 'salesforce' extra has all the requirements it needs and in case the provider changes its dependencies, they will transitively change as well. In the constraint mechanism we save both - provider versions and it's dependencies version, which means that installation using constraints is repeatable. For K8s, Celery and Dask which are both ""Core executors"" and ""Providers"" we have to add the base dependencies to the core as well - in order to mitigate problems where newer version of provider will have less strict limits. This should be done for both extras and their deprecated aliases. This is not a full protection however, the way extras work, this will not add ""hard"" limits for airflow and the user who does not use constraints :param extra: Name of the extra to add providers to :param providers: list of provider ids """""" if extra in ['cncf.kubernetes', 'kubernetes', 'celery']: EXTRAS_REQUIREMENTS[extra].extend( [get_provider_package_from_package_id(package_name) for package_name in providers] ) else: EXTRAS_REQUIREMENTS[extra] = [ get_provider_package_from_package_id(package_name) for package_name in providers ] " 45624,"def parse_domain_upload_file(contents, fname): """""" :param (str) contents: returned by a dcc.Upload 'contents' prop :param (str) fname: the filename associated with the dcc.Upload component :return: formatted protein domain data for the dash_bio.NeedlePlot component """""" data = [] if contents is not None: content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) if 'json' in fname: # Assume that the user uploaded a CSV file json_data = json.loads(decoded.decode('utf-8')) data = parse_protein_domains_data(json_data) return data ","def parse_domain_upload_file(contents, fname): """""" :param (str) contents: returned by a dcc.Upload 'contents' prop :param (str) fname: the filename associated with the dcc.Upload component :return: formatted protein domain data for the dash_bio.NeedlePlot component """""" data = [] if contents is not None: content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) if fname.split('.')[-1] == 'json': # Assume that the user uploaded a CSV file json_data = json.loads(decoded.decode('utf-8')) data = parse_protein_domains_data(json_data) return data " 50256,"def remove_the_oldest_user_address_if_address_limit_is_reached(user: ""User""): """"""Remove the oldest user address when max address limit is reached."""""" if not is_user_address_limit_reached(user): return remove_the_oldest_user_address(user) ","def remove_the_oldest_user_address_if_address_limit_is_reached(user: ""User""): """"""Remove the oldest user address when max address limit is reached."""""" if is_user_address_limit_reached(user): remove_the_oldest_user_address(user) " 20023,"def analyze_stem(rgb_img, stem_objects): """""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments. Inputs: rgb_img = RGB image to plot debug image stem_objects = List of stem segments (output from segment_sort function) Returns: labeled_img = Stem analysis debugging image :param rgb_img: numpy.ndarray :param stem_objects: list :return labeled_img: numpy.ndarray """""" labeled_img = np.copy(rgb_img) img_x, img_y, _ = np.shape(labeled_img) grouped_stem = np.vstack(stem_objects) # Find vertical height of the stem by measuring bounding box stem_x, stem_y, width, height = cv2.boundingRect(grouped_stem) # Calculate stem angle [vx, vy, x, y] = cv2.fitLine(grouped_stem, cv2.DIST_L2, 0, 0.01, 0.01) slope = -vy / vx # Calculate stem path length stem_length = cv2.arcLength(grouped_stem, False) / 2 outputs.add_observation(variable='stem_height', trait='vertical length of stem segments', method='plantcv.plantcv.morphology.analyze_stem', scale='pixels', datatype=float, value=height, label=None) outputs.add_observation(variable='stem_angle', trait='angle of combined stem object', method='plantcv.plantcv.morphology.analyze_stem', scale='degrees', datatype=float, value=float(slope), label=None) outputs.add_observation(variable='stem_length', trait='path length of combined stem object', method='plantcv.plantcv.morphology.analyze_stem', scale='None', datatype=float, value=stem_length, label=None) if params.debug is not None: params.device += 1 # Draw culm_height cv2.line(labeled_img, (int(stem_x), stem_y), (int(stem_x), stem_y + height), (0, 255, 0), params.line_thickness) # Draw combined stem angle x_min = 0 # Set bounds for regression lines to get drawn x_max = img_x intercept1 = int(((x - x_min) * slope) + y) intercept2 = int(((x - x_max) * slope) + y) if slope > 1000000 or slope < -1000000: print(""Slope is "", slope, "" and cannot be plotted."") else: cv2.line(labeled_img, (x_max - 1, intercept2), (x_min, intercept1), (0, 0, 255), 1) if params.debug == 'print': print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_stem_analysis.png')) elif params.debug == 'plot': plot_image(labeled_img) return labeled_img ","def analyze_stem(rgb_img, stem_objects): """""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments. Inputs: rgb_img = RGB image to plot debug image stem_objects = List of stem segments (output from segment_sort function) Returns: labeled_img = Stem analysis debugging image :param rgb_img: numpy.ndarray :param stem_objects: list :return labeled_img: numpy.ndarray """""" params.device += 1 labeled_img = np.copy(rgb_img) img_x, img_y, _ = np.shape(labeled_img) grouped_stem = np.vstack(stem_objects) # Find vertical height of the stem by measuring bounding box stem_x, stem_y, width, height = cv2.boundingRect(grouped_stem) # Calculate stem angle [vx, vy, x, y] = cv2.fitLine(grouped_stem, cv2.DIST_L2, 0, 0.01, 0.01) slope = -vy / vx # Calculate stem path length stem_length = cv2.arcLength(grouped_stem, False) / 2 outputs.add_observation(variable='stem_height', trait='vertical length of stem segments', method='plantcv.plantcv.morphology.analyze_stem', scale='pixels', datatype=float, value=height, label=None) outputs.add_observation(variable='stem_angle', trait='angle of combined stem object', method='plantcv.plantcv.morphology.analyze_stem', scale='degrees', datatype=float, value=float(slope), label=None) outputs.add_observation(variable='stem_length', trait='path length of combined stem object', method='plantcv.plantcv.morphology.analyze_stem', scale='None', datatype=float, value=stem_length, label=None) if params.debug is not None: params.device += 1 # Draw culm_height cv2.line(labeled_img, (int(stem_x), stem_y), (int(stem_x), stem_y + height), (0, 255, 0), params.line_thickness) # Draw combined stem angle x_min = 0 # Set bounds for regression lines to get drawn x_max = img_x intercept1 = int(((x - x_min) * slope) + y) intercept2 = int(((x - x_max) * slope) + y) if slope > 1000000 or slope < -1000000: print(""Slope is "", slope, "" and cannot be plotted."") else: cv2.line(labeled_img, (x_max - 1, intercept2), (x_min, intercept1), (0, 0, 255), 1) if params.debug == 'print': print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_stem_analysis.png')) elif params.debug == 'plot': plot_image(labeled_img) return labeled_img " 40012,"def get_cached_namespaces_to_validate(path): """""" Determine the most specific namespace(s) (i.e., extensions) that are cached in the given NWB file that should be used for validation. Example ------- The following example illustrates how we can use this function to validate against namespaces cached in a file. This is useful, e.g., when a file was created using an extension >>> from pynwb import validate >>> from pynwb.validate import get_cached_namespaces_to_validate >>> path = ""my_nwb_file.nwb"" >>> validate_namespaces, manager, cached_namespaces = get_cached_namespaces_to_validate(path) >>> with NWBHDF5IO(path, ""r"", manager=manager) as reader: >>> errors = [] >>> for ns in validate_namespaces: >>> errors += validate(io=reader, namespace=ns) :param path: Path for the NWB file :return: Tuple with: - List of strings with the most specific namespace(s) to use for validation. - BuildManager object for opening the file for validation - Dict with the full result from NWBHDF5IO.load_namespaces """""" catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) ns_deps = NWBHDF5IO.load_namespaces(catalog, path) # determine which namespaces are the most specific (i.e. extensions) and validate against those s = set(ns_deps.keys()) for k in ns_deps: s -= ns_deps[k].keys() # TODO remove this workaround for issue https://github.com/NeurodataWithoutBorders/pynwb/issues/1357 if 'hdmf-experimental' in s: s.remove('hdmf-experimental') # remove validation of hdmf-experimental for now namespaces = list(sorted(s)) if len(namespaces) > 0: tm = TypeMap(catalog) manager = BuildManager(tm) else: manager = None return namespaces, manager, ns_deps ","def get_cached_namespaces_to_validate(path): """""" Determine the most specific namespace(s) (i.e., extensions) that are cached in the given NWB file that should be used for validation. Example ------- The following example illustrates how we can use this function to validate against namespaces cached in a file. This is useful, e.g., when a file was created using an extension >>> from pynwb import validate >>> from pynwb.validate import get_cached_namespaces_to_validate >>> path = ""my_nwb_file.nwb"" >>> validate_namespaces, manager, cached_namespaces = get_cached_namespaces_to_validate(path) >>> with NWBHDF5IO(path, ""r"", manager=manager) as reader: >>> errors = [] >>> for ns in validate_namespaces: >>> errors += validate(io=reader, namespace=ns) :param path: Path for the NWB file :return: Tuple with: - List of strings with the most specific namespace(s) to use for validation. - BuildManager object for opening the file for validation - Dict with the full result from NWBHDF5IO.load_namespaces """""" catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) ns_deps = NWBHDF5IO.load_namespaces(catalog, path) # determine which namespaces are the most specific (i.e. extensions) and validate against those s = set(ns_deps.keys()) for k in ns_deps: s -= ns_deps[k].keys() # TODO remove this workaround for issue https://github.com/NeurodataWithoutBorders/pynwb/issues/1357 s.discard('hdmf-experimental') # remove validation of hdmf-experimental for now namespaces = list(sorted(s)) if len(namespaces) > 0: tm = TypeMap(catalog) manager = BuildManager(tm) else: manager = None return namespaces, manager, ns_deps " 31115,"def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any, build_number: str, private_packs: list, current_commit_hash: str, index_generation: int, is_private: bool = False, force_upload: bool = False, previous_commit_hash: str = None): """""" Upload updated index zip to cloud storage. :param index_folder_path: index folder full path. :param extract_destination_path: extract folder full path. :param index_blob: google cloud storage object that represents index.zip blob. :param build_number: circleCI build number, used as an index revision. :param private_packs: List of private packs and their price. :param current_commit_hash: last commit hash of head. :param index_generation: downloaded index generation. :param is_private: Indicates if upload is private. :param force_upload: Indicates if force upload or not. :param previous_commit_hash: The previous commit hash to diff with. :returns None. """""" if force_upload: # If we force upload we don't want to update the commit in the index.json file, # this is to be able to identify all changed packs in the next upload commit = previous_commit_hash logging.info('Force upload flow - Index commit hash shuould not be changed') logging.debug('commit hash is: {commit}') else: # Otherwise, update the index with the current commit hash (the commit of the upload) commit = current_commit_hash logging.info('Updating production index commit hash to master last commit hash') logging.debug('commit hash is: {commit}') with open(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json""), ""w+"") as index_file: index = { 'revision': build_number, 'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT), 'packs': private_packs, 'commit': commit } json.dump(index, index_file, indent=4) index_zip_name = os.path.basename(index_folder_path) index_zip_path = shutil.make_archive(base_name=index_folder_path, format=""zip"", root_dir=extract_destination_path, base_dir=index_zip_name) try: index_blob.reload() current_index_generation = index_blob.generation index_blob.cache_control = ""no-cache,max-age=0"" # disabling caching for index blob if is_private or current_index_generation == index_generation: index_blob.upload_from_filename(index_zip_path) logging.success(f""Finished uploading {GCPConfig.INDEX_NAME}.zip to storage."") else: logging.critical(f""Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation"") logging.critical(f""Downloaded index generation: {index_generation}"") logging.critical(f""Current index generation: {current_index_generation}"") sys.exit(0) except Exception: logging.exception(f""Failed in uploading {GCPConfig.INDEX_NAME}."") sys.exit(1) finally: shutil.rmtree(index_folder_path) ","def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any, build_number: str, private_packs: list, current_commit_hash: str, index_generation: int, is_private: bool = False, force_upload: bool = False, previous_commit_hash: str = None): """""" Upload updated index zip to cloud storage. :param index_folder_path: index folder full path. :param extract_destination_path: extract folder full path. :param index_blob: google cloud storage object that represents index.zip blob. :param build_number: circleCI build number, used as an index revision. :param private_packs: List of private packs and their price. :param current_commit_hash: last commit hash of head. :param index_generation: downloaded index generation. :param is_private: Indicates if upload is private. :param force_upload: Indicates if force upload or not. :param previous_commit_hash: The previous commit hash to diff with. :returns None. """""" if force_upload: # If we force upload we don't want to update the commit in the index.json file, # this is to be able to identify all changed packs in the next upload commit = previous_commit_hash logging.info('Force upload flow - Index commit hash shuould not be changed') logging.debug(f'commit hash is: {commit}') else: # Otherwise, update the index with the current commit hash (the commit of the upload) commit = current_commit_hash logging.info('Updating production index commit hash to master last commit hash') logging.debug('commit hash is: {commit}') with open(os.path.join(index_folder_path, f""{GCPConfig.INDEX_NAME}.json""), ""w+"") as index_file: index = { 'revision': build_number, 'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT), 'packs': private_packs, 'commit': commit } json.dump(index, index_file, indent=4) index_zip_name = os.path.basename(index_folder_path) index_zip_path = shutil.make_archive(base_name=index_folder_path, format=""zip"", root_dir=extract_destination_path, base_dir=index_zip_name) try: index_blob.reload() current_index_generation = index_blob.generation index_blob.cache_control = ""no-cache,max-age=0"" # disabling caching for index blob if is_private or current_index_generation == index_generation: index_blob.upload_from_filename(index_zip_path) logging.success(f""Finished uploading {GCPConfig.INDEX_NAME}.zip to storage."") else: logging.critical(f""Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation"") logging.critical(f""Downloaded index generation: {index_generation}"") logging.critical(f""Current index generation: {current_index_generation}"") sys.exit(0) except Exception: logging.exception(f""Failed in uploading {GCPConfig.INDEX_NAME}."") sys.exit(1) finally: shutil.rmtree(index_folder_path) " 32515,"def create_events_search(client: Client, fetch_mode: str, events_columns: str, events_limit: int, offense_id: int, offense_start_time: str = None, return_raw_response: bool = False, ) -> str: additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \ if fetch_mode == FetchMode.correlations_events_only.value else '' now: int = int(datetime.now().timestamp() * 1000) try: # Get all the events starting from one hour after epoch if not offense_start_time: offense = client.offenses_list(offense_id=offense_id) offense_start_time = offense['start_time'] query_expression = ( f'SELECT {events_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} ' f'START {offense_start_time} STOP {now}' ) print_debug_msg(f'Creating search for offense ID: {offense_id}, ' f'query_expression: {query_expression}') search_response = client.search_create(query_expression) print_debug_msg(f'Created search for offense ID: {offense_id}, ' f'Start Time: {offense_start_time}, ' f'events_limit: {events_limit}, ' f'ret_value: {search_response}.') if return_raw_response: return search_response return search_response['search_id'] if search_response['search_id'] else QueryStatus.ERROR.value except Exception as e: print_debug_msg(f'Search for {offense_id} failed. Error: {e}') time.sleep(FAILURE_SLEEP) return QueryStatus.ERROR.value ","def create_events_search(client: Client, fetch_mode: str, events_columns: str, events_limit: int, offense_id: int, offense_start_time: str = None, return_raw_response: bool = False, ) -> str: additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \ if fetch_mode == FetchMode.correlations_events_only.value else '' now_millis: int = int(datetime.now().timestamp() * 1000) try: # Get all the events starting from one hour after epoch if not offense_start_time: offense = client.offenses_list(offense_id=offense_id) offense_start_time = offense['start_time'] query_expression = ( f'SELECT {events_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} ' f'START {offense_start_time} STOP {now}' ) print_debug_msg(f'Creating search for offense ID: {offense_id}, ' f'query_expression: {query_expression}') search_response = client.search_create(query_expression) print_debug_msg(f'Created search for offense ID: {offense_id}, ' f'Start Time: {offense_start_time}, ' f'events_limit: {events_limit}, ' f'ret_value: {search_response}.') if return_raw_response: return search_response return search_response['search_id'] if search_response['search_id'] else QueryStatus.ERROR.value except Exception as e: print_debug_msg(f'Search for {offense_id} failed. Error: {e}') time.sleep(FAILURE_SLEEP) return QueryStatus.ERROR.value " 4552,"def _permuted_ols_on_chunk(scores_original_data, tested_vars, target_vars, thread_id, confounding_vars=None, n_perm=10000, n_perm_chunk=10000, intercept_test=True, two_sided_test=True, random_state=None, verbose=0): """"""Massively univariate group analysis with permuted OLS on a data chunk. To be used in a parallel computing context. Parameters ---------- scores_original_data : array-like, shape=(n_descriptors, n_regressors) t-scores obtained for the original (non-permuted) data. tested_vars : array-like, shape=(n_samples, n_regressors) Explanatory variates. target_vars : array-like, shape=(n_samples, n_targets) fMRI data. F-ordered for efficient computations. thread_id : int process id, used for display. confounding_vars : array-like, shape=(n_samples, n_covars) Clinical data (covariates). n_perm : int, Tomtal number of permutations to perform, only used for display in this function. n_perm_chunk : int, Number of permutations to be performed. intercept_test : boolean, Change the permutation scheme (swap signs for intercept, switch labels otherwise). See reference two_sided_test : boolean, If True, performs an unsigned t-test. Both positive and negative effects are considered; the null hypothesis is that the effect is zero. If False, only positive effects are considered as relevant. The null hypothesis is that the effect is zero or negative. random_state : int or None, Seed for random number generator, to have the same permutations in each computing units. verbose: int, optional, default is 0. Defines the verbosity level. Returns ------- h0_fmax_part : array-like, shape=(n_perm_chunk, ) Distribution of the (max) t-statistic under the null hypothesis (limited to this permutation chunk). References ---------- * Fisher, R. A. (1935). The design of experiments. """""" # initialize the seed of the random generator rng = check_random_state(random_state) n_samples, n_regressors = tested_vars.shape n_descriptors = target_vars.shape[1] # run the permutations t0 = time.time() h0_fmax_part = np.empty((n_perm_chunk, n_regressors)) scores_as_ranks_part = np.zeros((n_regressors, n_descriptors)) for i in range(n_perm_chunk): if intercept_test: # sign swap (random multiplication by 1 or -1) target_vars = (target_vars * (rng.randint(2, size=(n_samples, 1)) * 2 - 1)) else: # shuffle data # Regarding computation costs, we choose to shuffle testvars # and covars rather than fmri_signal. # Also, it is important to shuffle tested_vars and covars # jointly to simplify t-scores computation (null dot product). shuffle_idx = rng.permutation(n_samples) tested_vars = tested_vars[shuffle_idx] if confounding_vars is not None: confounding_vars = confounding_vars[shuffle_idx] # OLS regression on randomized data perm_scores = np.asfortranarray( _t_score_with_covars_and_normalized_design(tested_vars, target_vars, confounding_vars)) if two_sided_test: perm_scores = np.fabs(perm_scores) h0_fmax_part[i] = np.nanmax(perm_scores, axis=0) # find the rank of the original scores in h0_part # (when n_descriptors or n_perm are large, it can be quite long to # find the rank of the original scores into the whole H0 distribution. # Here, it is performed in parallel by the workers involded in the # permutation computation) scores_as_ranks_part += (h0_fmax_part[i].reshape((-1, 1)) < scores_original_data.T) if verbose > 0: step = 11 - min(verbose, 10) if i % step == 0: # If there is only one job, progress information is fixed if n_perm == n_perm_chunk: crlf = ""\r"" else: crlf = ""\n"" percent = float(i) / n_perm_chunk percent = round(percent * 100, 2) dt = time.time() - t0 remaining = (100. - percent) / max(0.01, percent) * dt sys.stderr.write( ""Job #%d, processed %d/%d permutations "" ""(%0.2f%%, %i seconds remaining)%s"" % (thread_id, i, n_perm_chunk, percent, remaining, crlf)) return scores_as_ranks_part, h0_fmax_part.T ","def _permuted_ols_on_chunk(scores_original_data, tested_vars, target_vars, thread_id, confounding_vars=None, n_perm=10000, n_perm_chunk=10000, intercept_test=True, two_sided_test=True, random_state=None, verbose=0): """"""Massively univariate group analysis with permuted OLS on a data chunk. To be used in a parallel computing context. Parameters ---------- scores_original_data : array-like, shape=(n_descriptors, n_regressors) t-scores obtained for the original (non-permuted) data. tested_vars : array-like, shape=(n_samples, n_regressors) Explanatory variates. target_vars : array-like, shape=(n_samples, n_targets) fMRI data. F-ordered for efficient computations. thread_id : int process id, used for display. confounding_vars : array-like, shape=(n_samples, n_covars) Clinical data (covariates). n_perm : int, Total number of permutations to perform, only used for display in this function. n_perm_chunk : int, Number of permutations to be performed. intercept_test : boolean, Change the permutation scheme (swap signs for intercept, switch labels otherwise). See reference two_sided_test : boolean, If True, performs an unsigned t-test. Both positive and negative effects are considered; the null hypothesis is that the effect is zero. If False, only positive effects are considered as relevant. The null hypothesis is that the effect is zero or negative. random_state : int or None, Seed for random number generator, to have the same permutations in each computing units. verbose: int, optional, default is 0. Defines the verbosity level. Returns ------- h0_fmax_part : array-like, shape=(n_perm_chunk, ) Distribution of the (max) t-statistic under the null hypothesis (limited to this permutation chunk). References ---------- * Fisher, R. A. (1935). The design of experiments. """""" # initialize the seed of the random generator rng = check_random_state(random_state) n_samples, n_regressors = tested_vars.shape n_descriptors = target_vars.shape[1] # run the permutations t0 = time.time() h0_fmax_part = np.empty((n_perm_chunk, n_regressors)) scores_as_ranks_part = np.zeros((n_regressors, n_descriptors)) for i in range(n_perm_chunk): if intercept_test: # sign swap (random multiplication by 1 or -1) target_vars = (target_vars * (rng.randint(2, size=(n_samples, 1)) * 2 - 1)) else: # shuffle data # Regarding computation costs, we choose to shuffle testvars # and covars rather than fmri_signal. # Also, it is important to shuffle tested_vars and covars # jointly to simplify t-scores computation (null dot product). shuffle_idx = rng.permutation(n_samples) tested_vars = tested_vars[shuffle_idx] if confounding_vars is not None: confounding_vars = confounding_vars[shuffle_idx] # OLS regression on randomized data perm_scores = np.asfortranarray( _t_score_with_covars_and_normalized_design(tested_vars, target_vars, confounding_vars)) if two_sided_test: perm_scores = np.fabs(perm_scores) h0_fmax_part[i] = np.nanmax(perm_scores, axis=0) # find the rank of the original scores in h0_part # (when n_descriptors or n_perm are large, it can be quite long to # find the rank of the original scores into the whole H0 distribution. # Here, it is performed in parallel by the workers involded in the # permutation computation) scores_as_ranks_part += (h0_fmax_part[i].reshape((-1, 1)) < scores_original_data.T) if verbose > 0: step = 11 - min(verbose, 10) if i % step == 0: # If there is only one job, progress information is fixed if n_perm == n_perm_chunk: crlf = ""\r"" else: crlf = ""\n"" percent = float(i) / n_perm_chunk percent = round(percent * 100, 2) dt = time.time() - t0 remaining = (100. - percent) / max(0.01, percent) * dt sys.stderr.write( ""Job #%d, processed %d/%d permutations "" ""(%0.2f%%, %i seconds remaining)%s"" % (thread_id, i, n_perm_chunk, percent, remaining, crlf)) return scores_as_ranks_part, h0_fmax_part.T " 58881,"def convert_color_to_dtype(data, output_dtype): assert output_dtype in [""uint8"", ""uint16""] column_names = [""red"", ""green"", ""blue""] input_dtype = get_color_dtype(data, column_names) if input_dtype is not None: # Color information in las/laz files is stored as uint8 or uint16 assert input_dtype in [""uint8"", ""uint16""] if input_dtype == ""uint8"" and output_dtype == ""uint16"": data[""points""].loc[:, column_names] *= 256 elif input_dtype == ""uint16"" and output_dtype == ""uint8"": data[""points""].loc[:, column_names] /= 256 data[""points""] = data[""points""].astype( {""red"": output_dtype, ""green"": output_dtype, ""blue"": output_dtype}) return data ","def convert_color_to_dtype(data, output_dtype): assert output_dtype in [""uint8"", ""uint16""] column_names = [""red"", ""green"", ""blue""] input_dtype = get_color_dtype(data, column_names) if input_dtype is not None: # Color information in las/laz files is stored as uint8 or uint16 if input_dtype not in [""uint8"", ""uint16""]: raise ValueError(f""Invalid color dtype. Expected one of ['uint8', 'uint16'] but got {input_dtype}"") if input_dtype == ""uint8"" and output_dtype == ""uint16"": data[""points""].loc[:, column_names] *= 256 elif input_dtype == ""uint16"" and output_dtype == ""uint8"": data[""points""].loc[:, column_names] /= 256 data[""points""] = data[""points""].astype( {""red"": output_dtype, ""green"": output_dtype, ""blue"": output_dtype}) return data " 30665,"def module_test_command(client: Client, args: dict): """""" Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Args: client(Client): Autofocus Feed client args(Dict): The instance parameters Returns: 'ok' if test passed, anything else will fail the test. """""" indicator_feeds = client.indicator_feeds exception_list = [] # type:List if 'Daily Threat Feed' in indicator_feeds: raise Exception(""Daily Thread is no longer supported by this feed,"" "" please configure the FeedAutoFocusDaily for this action"") if 'Custom Feed' in indicator_feeds: client.indicator_feeds = ['Custom Feed'] url_list = client.custom_feed_url_list for url in url_list: client.custom_feed_url_list = [url] try: client.build_iterator(1, 0) except Exception: exception_list.append(f""Could not fetch Custom Feed {url}\n"" f""\nCheck your API key the URL for the feed and Check "" f""if they are Enabled in AutoFocus."") if 'Samples Feed' in indicator_feeds: client.indicator_feeds = ['Samples Feed'] try: client.build_iterator(1, 0) except Exception: exception_list.append(""Could not fetch Samples Feed\n"" ""\nCheck your instance configuration and your connection to AutoFocus."") if len(exception_list) > 0: raise Exception(""\n"".join(exception_list)) return 'ok', {}, {} ","def module_test_command(client: Client, args: dict): """""" Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Args: client(Client): Autofocus Feed client args(Dict): The instance parameters Returns: 'ok' if test passed, anything else will fail the test. """""" indicator_feeds = client.indicator_feeds exception_list = [] # type:List if 'Daily Threat Feed' in indicator_feeds: raise Exception(""Daily Thread is no longer supported by this feed,"" "" please configure the AutoFocus Daily Feed for this action"") if 'Custom Feed' in indicator_feeds: client.indicator_feeds = ['Custom Feed'] url_list = client.custom_feed_url_list for url in url_list: client.custom_feed_url_list = [url] try: client.build_iterator(1, 0) except Exception: exception_list.append(f""Could not fetch Custom Feed {url}\n"" f""\nCheck your API key the URL for the feed and Check "" f""if they are Enabled in AutoFocus."") if 'Samples Feed' in indicator_feeds: client.indicator_feeds = ['Samples Feed'] try: client.build_iterator(1, 0) except Exception: exception_list.append(""Could not fetch Samples Feed\n"" ""\nCheck your instance configuration and your connection to AutoFocus."") if len(exception_list) > 0: raise Exception(""\n"".join(exception_list)) return 'ok', {}, {} " 42929,"def _node_coords(graph: nx.Graph, l: dict) -> Tuple: """""" Provides the coordinates for the graph nodes when given an input graph layout. Args: graph (nx.Graph): input graph l (dict): dictionary of nodes and their respective coordinates Returns: Tuple: x and y coordinates for each node """""" n_x = [] n_y = [] for n in graph.nodes(): n_x.append(l[n][0]) n_y.append(l[n][1]) return {""x"": n_x, ""y"": n_y} ","def _node_coords(graph: nx.Graph, l: dict) -> Tuple: """"""Provides the coordinates for the graph nodes when given an input graph layout. Args: graph (nx.Graph): input graph l (dict): dictionary of nodes and their respective coordinates Returns: Tuple: x and y coordinates for each node """""" n_x = [] n_y = [] for n in graph.nodes(): n_x.append(l[n][0]) n_y.append(l[n][1]) return {""x"": n_x, ""y"": n_y} " 52128,"def _update_labels(sm, deployment): """""" Updating the deployment's labels. This function replaces the existing deployment's lables with the new labels that were passed in the request. If a new label already exists, it won't be created again. If an existing label is not in the new labels list, it will be deleted. """""" new_labels = _get_labels(request.json) if new_labels is None: return rm = get_resource_manager() new_labels_set = set(new_labels) existing_labels = sm.list( models.DeploymentLabel, filters={'_deployment_fk': deployment._storage_id} ) existing_labels_tup = set( (label.key, label.value) for label in existing_labels) labels_to_create = new_labels_set - existing_labels_tup raw_labels_to_delete = existing_labels_tup - new_labels_set labels_to_delete = [ label for label in existing_labels if (label.key, label.value) in raw_labels_to_delete] for label in labels_to_delete: sm.delete(label) rm.create_deployment_labels(deployment, labels_to_create) ","def _update_labels(sm, deployment): """""" Updating the deployment's labels. This function replaces the existing deployment's lables with the new labels that were passed in the request. If a new label already exists, it won't be created again. If an existing label is not in the new labels list, it will be deleted. """""" new_labels = _get_labels(request.json) if new_labels is None: return rm = get_resource_manager() new_labels_set = set(new_labels) existing_labels = sm.list( models.DeploymentLabel, filters={'_deployment_fk': deployment._storage_id} ) existing_labels_tup = set( (label.key, label.value) for label in existing_labels) labels_to_create = new_labels_set - existing_labels_tup labels_to_delete = [ label for label in existing_labels if (label.key, label.value) not in new_labels_set] for label in labels_to_delete: sm.delete(label) rm.create_deployment_labels(deployment, labels_to_create) " 43711,"def one_particle(t_matrix_elements, core=None, active=None, cutoff=1.0e-12): r""""""Generates the table of matrix elements of a given one-particle operator required to build many-body qubit observables. Second quantized one-particle operators are expanded in the basis of single-particle states as .. math:: \hat{T} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t} \vert \beta \rangle [\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} + \hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}]. In the equation above the indices :math:`\alpha, \beta` run over the basis of spatial orbitals :math:`\vert \alpha \rangle = \phi_\alpha(r)`. Since the operator :math:`t` acts only on the spatial coordinates, the spin quantum numbers are indicated explicitly with the up/down arrows. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the particle creation and annihilation operators, respectively, and :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix elements of the operator :math:`\hat{t}` .. math:: \langle \alpha \vert \hat{t} \vert \beta \rangle = \int dr ~ \phi_\alpha^*(r) \hat{t}(r) \phi_\beta(r). If an active space is defined (see :func:`~.active_space`), the summation indices run over the active orbitals and the contribution due to core orbitals is computed as :math:`T_\mathrm{core} = 2 \sum_{\alpha\in \mathrm{core}} \langle \alpha \vert \hat{t} \vert \beta \rangle`. Args: t_matrix_elements (array[float]): 2D NumPy array with the matrix elements :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` core (list): indices of core orbitals, i.e., the orbitals that are not correlated in the many-body wave function active (list): indices of active orbitals, i.e., the orbitals used to build the correlated many-body wave function cutoff (float): Cutoff value for including matrix elements. The matrix elements with absolute value less than ``cutoff`` are neglected. Returns: tuple: Table of indices and matrix elements of the one-particle operator and the contribution due to core orbitals. The returned table is a 2D Numpy array where each row contains three elements, the *spin*-orbital indices :math:`\alpha, \beta` and the matrix element :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`. **Example** >>> t_matrix_elements = np.array([[-4.72739313e+00, -1.05499666e-01, -1.66961416e-01, ... 6.18014041e-16, 2.86964662e-16, -3.46772026e-02], ... [-1.05499666e-01, -1.49264622e+00, 3.28928073e-02, ... -2.20398308e-16, 1.93277291e-16, 5.27078882e-02], ... [-1.66961416e-01, 3.28928073e-02, -1.12554473e+00, ... -2.82912389e-17, 2.55224784e-16, -3.04455743e-02], ... [ 6.18014041e-16, -2.20398308e-16, -2.82912389e-17, ... -1.13579985e+00, -1.94289029e-16, -2.36158697e-16], ... [ 2.86964662e-16, 1.93277291e-16, 2.55224784e-16, ... -2.77555756e-16, -1.13579985e+00, 2.06665432e-16], ... [-3.46772026e-02, 5.27078882e-02, -3.04455743e-02, ... -2.36158697e-16, 2.06665432e-16, -9.50966595e-01]] >>> t_table, t_core = one_particle(t_matrix_elements, core=[0], active=[1, 2]) >>> print(t_table) [[ 0. 0. -1.49264622] [ 1. 1. -1.49264622] [ 0. 2. 0.03289281] [ 1. 3. 0.03289281] [ 2. 0. 0.03289281] [ 3. 1. 0.03289281] [ 2. 2. -1.12554473] [ 3. 3. -1.12554473]] >>> print(t_core) -9.45478626 """""" orbitals = t_matrix_elements.shape[0] if t_matrix_elements.ndim != 2: raise ValueError( ""'t_matrix_elements' must be a 2D array; got t_matrix_elements.ndim = {}"".format( t_matrix_elements.ndim ) ) if core is None: t_core = 0 else: if True in [i > orbitals - 1 or i < 0 for i in core]: raise ValueError( ""Indices of core orbitals must be between 0 and {}; got core = {}"".format( orbitals, core ) ) # Compute contribution due to core orbitals t_core = 2 * sum([t_matrix_elements[alpha, alpha] for alpha in core]) if active is None: if core is None: active = list(range(orbitals)) else: active = [i for i in range(orbitals) if i not in core] if True in [i > orbitals - 1 or i < 0 for i in active]: raise ValueError( ""Indices of active orbitals must be between 0 and {}; got active = {}"".format( orbitals, active ) ) # Indices of the matrix elements with absolute values >= cutoff indices = np.nonzero(np.abs(t_matrix_elements) >= cutoff) # Single out the indices of active orbitals num_indices = len(indices[0]) pairs = [ [indices[0][i], indices[1][i]] for i in range(num_indices) if all(indices[j][i] in active for j in range(len(indices))) ] # Building the table of indices and matrix elements t_table = np.zeros((2 * len(pairs), 3)) for i, pair in enumerate(pairs): alpha, beta = pair element = t_matrix_elements[alpha, beta] # spin-up term t_table[2 * i, 0] = 2 * active.index(alpha) t_table[2 * i, 1] = 2 * active.index(beta) t_table[2 * i, 2] = element # spin-down term t_table[2 * i + 1, 0] = 2 * active.index(alpha) + 1 t_table[2 * i + 1, 1] = 2 * active.index(beta) + 1 t_table[2 * i + 1, 2] = element return t_table, t_core ","def one_particle(t_matrix_elements, core=None, active=None, cutoff=1.0e-12): r""""""Generates the table of matrix elements of a given one-particle operator required to build many-body qubit observables. Second quantized one-particle operators are expanded in the basis of single-particle states as .. math:: \hat{T} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t} \vert \beta \rangle [\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} + \hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}]. In the equation above the indices :math:`\alpha, \beta` run over the basis of spatial orbitals :math:`\vert \alpha \rangle = \phi_\alpha(r)`. Since the operator :math:`t` acts only on the spatial coordinates, the spin quantum numbers are indicated explicitly with the up/down arrows. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the particle creation and annihilation operators, respectively, and :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix elements of the operator :math:`\hat{t}` .. math:: \langle \alpha \vert \hat{t} \vert \beta \rangle = \int dr ~ \phi_\alpha^*(r) \hat{t}(r) \phi_\beta(r). If an active space is defined (see :func:`~.active_space`), the summation indices run over the active orbitals and the contribution due to core orbitals is computed as :math:`T_\mathrm{core} = 2 \sum_{\alpha\in \mathrm{core}} \langle \alpha \vert \hat{t} \vert \beta \rangle`. Args: t_matrix_elements (array[float]): 2D NumPy array with the matrix elements :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` core (list): indices of core orbitals, i.e., the orbitals that are not correlated in the many-body wave function active (list): indices of active orbitals, i.e., the orbitals used to build the correlated many-body wave function cutoff (float): Cutoff value for including matrix elements. The matrix elements with absolute value less than ``cutoff`` are neglected. Returns: tuple: Table of indices and matrix elements of the one-particle operator and the contribution due to core orbitals. The returned table is a 2D Numpy array where each row contains three elements, the *spin*-orbital indices :math:`\alpha, \beta` and the matrix element :math:`\langle \alpha \vert \hat{t} \vert \beta \rangle`. **Example** >>> matrix_elements = np.array([[-4.72739313e+00, -1.05499666e-01, -1.66961416e-01, ... 6.18014041e-16, 2.86964662e-16, -3.46772026e-02], ... [-1.05499666e-01, -1.49264622e+00, 3.28928073e-02, ... -2.20398308e-16, 1.93277291e-16, 5.27078882e-02], ... [-1.66961416e-01, 3.28928073e-02, -1.12554473e+00, ... -2.82912389e-17, 2.55224784e-16, -3.04455743e-02], ... [ 6.18014041e-16, -2.20398308e-16, -2.82912389e-17, ... -1.13579985e+00, -1.94289029e-16, -2.36158697e-16], ... [ 2.86964662e-16, 1.93277291e-16, 2.55224784e-16, ... -2.77555756e-16, -1.13579985e+00, 2.06665432e-16], ... [-3.46772026e-02, 5.27078882e-02, -3.04455743e-02, ... -2.36158697e-16, 2.06665432e-16, -9.50966595e-01]] >>> t_table, t_core = one_particle(t_matrix_elements, core=[0], active=[1, 2]) >>> print(t_table) [[ 0. 0. -1.49264622] [ 1. 1. -1.49264622] [ 0. 2. 0.03289281] [ 1. 3. 0.03289281] [ 2. 0. 0.03289281] [ 3. 1. 0.03289281] [ 2. 2. -1.12554473] [ 3. 3. -1.12554473]] >>> print(t_core) -9.45478626 """""" orbitals = t_matrix_elements.shape[0] if t_matrix_elements.ndim != 2: raise ValueError( ""'t_matrix_elements' must be a 2D array; got t_matrix_elements.ndim = {}"".format( t_matrix_elements.ndim ) ) if core is None: t_core = 0 else: if True in [i > orbitals - 1 or i < 0 for i in core]: raise ValueError( ""Indices of core orbitals must be between 0 and {}; got core = {}"".format( orbitals, core ) ) # Compute contribution due to core orbitals t_core = 2 * sum([t_matrix_elements[alpha, alpha] for alpha in core]) if active is None: if core is None: active = list(range(orbitals)) else: active = [i for i in range(orbitals) if i not in core] if True in [i > orbitals - 1 or i < 0 for i in active]: raise ValueError( ""Indices of active orbitals must be between 0 and {}; got active = {}"".format( orbitals, active ) ) # Indices of the matrix elements with absolute values >= cutoff indices = np.nonzero(np.abs(t_matrix_elements) >= cutoff) # Single out the indices of active orbitals num_indices = len(indices[0]) pairs = [ [indices[0][i], indices[1][i]] for i in range(num_indices) if all(indices[j][i] in active for j in range(len(indices))) ] # Building the table of indices and matrix elements t_table = np.zeros((2 * len(pairs), 3)) for i, pair in enumerate(pairs): alpha, beta = pair element = t_matrix_elements[alpha, beta] # spin-up term t_table[2 * i, 0] = 2 * active.index(alpha) t_table[2 * i, 1] = 2 * active.index(beta) t_table[2 * i, 2] = element # spin-down term t_table[2 * i + 1, 0] = 2 * active.index(alpha) + 1 t_table[2 * i + 1, 1] = 2 * active.index(beta) + 1 t_table[2 * i + 1, 2] = element return t_table, t_core " 28575,"def plot_forest( data, kind=""forestplot"", model_names=None, var_names=None, filter_vars=None, transform=None, coords=None, combined=False, hdi_prob=None, rope=None, quartiles=True, ess=False, r_hat=False, colors=""cycle"", textsize=None, linewidth=None, markersize=None, legend=True, labeller=None, ridgeplot_alpha=None, ridgeplot_overlap=2, ridgeplot_kind=""auto"", ridgeplot_truncate=True, ridgeplot_quantiles=None, figsize=None, ax=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """"""Forest plot to compare HDI intervals from a number of distributions. Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces. Parameters ---------- data: obj or list[obj] Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details kind: str Choose kind of plot for main axis. Supports ""forestplot"" or ""ridgeplot"" model_names: list[str], optional List with names for the models in the list of data. Useful when plotting more that one dataset. var_names: list[str], optional List of variables to plot (defaults to None, which results in all variables plotted) Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. transform: callable Function to transform data (defaults to None i.e.the identity function) coords: dict, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel`. combined: bool Flag for combining multiple chains into a single chain. If ``False``(default), chains will be plotted separately. hdi_prob: float, optional Plots highest posterior density interval for chosen percentage of density. Defaults to `0.94`. rope: tuple or dictionary of tuples Lower and upper values of the Region Of Practical Equivalence. If a list with one interval only is provided, the **rope** will be displayed across the y-axis. If more than one interval is provided the length of the list should match the number of variables. quartiles: bool, optional Flag for plotting the interquartile range, in addition to the hdi_prob intervals. Defaults to ``True``. r_hat: bool, optional Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False ess: bool, optional Flag for plotting the effective sample size. Defaults to ``False``. colors: list or string, optional list with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically chose a color per model from the matplotlibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all models. Defaults to 'cycle'. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. linewidth: int Line width throughout. If None it will be autoscaled based on ``figsize``. markersize: int Markersize throughout. If None it will be autoscaled based on ``figsize``. legend : bool, optional Show a legend with the color encoded model information. Defaults to ``True`` if there are multiple models labeller : labeller instance, optional Class providing the method ``make_model_label`` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ridgeplot_alpha: float Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise a `black` outline is used. ridgeplot_overlap: float Overlap height for ridgeplots. ridgeplot_kind: string By default (""auto"") continuous variables are plotted using KDEs and discrete ones using histograms. To override this use ""hist"" to plot histograms and ""density"" for KDEs ridgeplot_truncate: bool Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``. ridgeplot_quantiles: list Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles. Defaults to ``None``. figsize: tuple Figure size. If ``None``, it will be defined automatically. ax: axes, optional :class:`matplotlib.axes` or :class:`bokeh.plotting.figure`. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default to ""matplotlib"". backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. show: bool, optional Call backend show function. Returns ------- gridspec: matplotlib GridSpec or bokeh figures See Also -------- plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book. plot_density: Generate KDE plots for continuous variables and histograms for discrete ones. Examples -------- Forestpĺot .. plot:: :context: close-figs >>> import arviz as az >>> non_centered_data = az.load_arviz_data('non_centered_eight') >>> axes = az.plot_forest(non_centered_data, >>> kind='forestplot', >>> var_names=[""^the""], >>> filter_vars=""regex"", >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Forestplot with multiple datasets .. plot:: :context: close-figs >>> centered_data = az.load_arviz_data('centered_eight') >>> axes = az.plot_forest([non_centered_data, centered_data], >>> model_names = [""non centered eight"", ""centered eight""], >>> kind='forestplot', >>> var_names=[""^the""], >>> filter_vars=""regex"", >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools models') Forestpĺot with ropes .. plot:: :context: close-figs >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]} >>> axes = az.plot_forest(non_centered_data, >>> rope=rope, >>> var_names='~tau', >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot .. plot:: :context: close-figs >>> axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot non-truncated and with quantiles .. plot:: :context: close-figs >>> axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_truncate=False, >>> ridgeplot_quantiles=[.25, .5, .75], >>> ridgeplot_overlap=0.7, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') """""" if not isinstance(data, (list, tuple)): data = [data] if len(data) == 1: legend = False if coords is None: coords = {} if labeller is None: labeller = NoModelLabeller() if legend else BaseLabeller() datasets = [convert_to_dataset(datum) for datum in reversed(data)] if transform is not None: datasets = [transform(dataset) for dataset in datasets] datasets = get_coords( datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords ) var_names = _var_names(var_names, datasets, filter_vars) ncols, width_ratios = 1, [3] if ess: ncols += 1 width_ratios.append(1) if r_hat: ncols += 1 width_ratios.append(1) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") plot_forest_kwargs = dict( ax=ax, datasets=datasets, var_names=var_names, model_names=model_names, combined=combined, colors=colors, figsize=figsize, width_ratios=width_ratios, linewidth=linewidth, markersize=markersize, kind=kind, ncols=ncols, hdi_prob=hdi_prob, quartiles=quartiles, rope=rope, ridgeplot_overlap=ridgeplot_overlap, ridgeplot_alpha=ridgeplot_alpha, ridgeplot_kind=ridgeplot_kind, ridgeplot_truncate=ridgeplot_truncate, ridgeplot_quantiles=ridgeplot_quantiles, textsize=textsize, legend=legend, labeller=labeller, ess=ess, r_hat=r_hat, backend_kwargs=backend_kwargs, backend_config=backend_config, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_forest"", ""forestplot"", backend) axes = plot(**plot_forest_kwargs) return axes ","def plot_forest( data, kind=""forestplot"", model_names=None, var_names=None, filter_vars=None, transform=None, coords=None, combined=False, hdi_prob=None, rope=None, quartiles=True, ess=False, r_hat=False, colors=""cycle"", textsize=None, linewidth=None, markersize=None, legend=True, labeller=None, ridgeplot_alpha=None, ridgeplot_overlap=2, ridgeplot_kind=""auto"", ridgeplot_truncate=True, ridgeplot_quantiles=None, figsize=None, ax=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """"""Forest plot to compare HDI intervals from a number of distributions. Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces. Parameters ---------- data: obj or list[obj] Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details kind: str Choose kind of plot for main axis. Supports ""forestplot"" or ""ridgeplot"" model_names: list[str], optional List with names for the models in the list of data. Useful when plotting more that one dataset. var_names: list[str], optional List of variables to plot (defaults to None, which results in all variables plotted) Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. transform: callable Function to transform data (defaults to None i.e.the identity function) coords: dict, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel`. combined: bool Flag for combining multiple chains into a single chain. If ``False``(default), chains will be plotted separately. hdi_prob: float, optional Plots highest posterior density interval for chosen percentage of density. Defaults to `0.94`. rope: tuple or dictionary of tuples Lower and upper values of the Region Of Practical Equivalence. If a list with one interval only is provided, the **rope** will be displayed across the y-axis. If more than one interval is provided the length of the list should match the number of variables. quartiles: bool, optional Flag for plotting the interquartile range, in addition to the hdi_prob intervals. Defaults to ``True``. r_hat: bool, optional Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False ess: bool, optional Flag for plotting the effective sample size. Defaults to ``False``. colors: list or string, optional list with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically chose a color per model from the matplotlibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all models. Defaults to 'cycle'. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. linewidth: int Line width throughout. If None it will be autoscaled based on ``figsize``. markersize: int Markersize throughout. If None it will be autoscaled based on ``figsize``. legend : bool, optional Show a legend with the color encoded model information. Defaults to ``True`` if there are multiple models labeller : labeller instance, optional Class providing the method ``make_model_label`` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ridgeplot_alpha: float Transparency for ridgeplot fill. If **0**, border is colored by model, otherwise a `black` outline is used. ridgeplot_overlap: float Overlap height for ridgeplots. ridgeplot_kind: string By default (""auto"") continuous variables are plotted using KDEs and discrete ones using histograms. To override this use ""hist"" to plot histograms and ""density"" for KDEs ridgeplot_truncate: bool Whether to truncate densities according to the value of hdi_prop. Defaults to ``True``. ridgeplot_quantiles: list Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles. Defaults to ``None``. figsize: tuple Figure size. If ``None``, it will be defined automatically. ax: axes, optional :class:`matplotlib.axes.Axes` or :class:`bokeh.plotting.Figure`. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default to ""matplotlib"". backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. show: bool, optional Call backend show function. Returns ------- gridspec: matplotlib GridSpec or bokeh figures See Also -------- plot_posterior: Plot Posterior densities in the style of John K. Kruschke’s book. plot_density: Generate KDE plots for continuous variables and histograms for discrete ones. Examples -------- Forestpĺot .. plot:: :context: close-figs >>> import arviz as az >>> non_centered_data = az.load_arviz_data('non_centered_eight') >>> axes = az.plot_forest(non_centered_data, >>> kind='forestplot', >>> var_names=[""^the""], >>> filter_vars=""regex"", >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Forestplot with multiple datasets .. plot:: :context: close-figs >>> centered_data = az.load_arviz_data('centered_eight') >>> axes = az.plot_forest([non_centered_data, centered_data], >>> model_names = [""non centered eight"", ""centered eight""], >>> kind='forestplot', >>> var_names=[""^the""], >>> filter_vars=""regex"", >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools models') Forestpĺot with ropes .. plot:: :context: close-figs >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]} >>> axes = az.plot_forest(non_centered_data, >>> rope=rope, >>> var_names='~tau', >>> combined=True, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot .. plot:: :context: close-figs >>> axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot non-truncated and with quantiles .. plot:: :context: close-figs >>> axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_truncate=False, >>> ridgeplot_quantiles=[.25, .5, .75], >>> ridgeplot_overlap=0.7, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') """""" if not isinstance(data, (list, tuple)): data = [data] if len(data) == 1: legend = False if coords is None: coords = {} if labeller is None: labeller = NoModelLabeller() if legend else BaseLabeller() datasets = [convert_to_dataset(datum) for datum in reversed(data)] if transform is not None: datasets = [transform(dataset) for dataset in datasets] datasets = get_coords( datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords ) var_names = _var_names(var_names, datasets, filter_vars) ncols, width_ratios = 1, [3] if ess: ncols += 1 width_ratios.append(1) if r_hat: ncols += 1 width_ratios.append(1) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") plot_forest_kwargs = dict( ax=ax, datasets=datasets, var_names=var_names, model_names=model_names, combined=combined, colors=colors, figsize=figsize, width_ratios=width_ratios, linewidth=linewidth, markersize=markersize, kind=kind, ncols=ncols, hdi_prob=hdi_prob, quartiles=quartiles, rope=rope, ridgeplot_overlap=ridgeplot_overlap, ridgeplot_alpha=ridgeplot_alpha, ridgeplot_kind=ridgeplot_kind, ridgeplot_truncate=ridgeplot_truncate, ridgeplot_quantiles=ridgeplot_quantiles, textsize=textsize, legend=legend, labeller=labeller, ess=ess, r_hat=r_hat, backend_kwargs=backend_kwargs, backend_config=backend_config, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_forest"", ""forestplot"", backend) axes = plot(**plot_forest_kwargs) return axes " 56225,"def convert_to_onnx(model, input_shape, output_file, input_names, output_names): """"""Convert PyTorch model to ONNX and check the resulting onnx model"""""" output_file.parent.mkdir(parents=True, exist_ok=True) model.eval() dummy_input = torch.randn(input_shape) model(dummy_input) torch.onnx.export(model, dummy_input, str(output_file), verbose=False, opset_version=9, input_names=input_names.split(','), output_names=output_names.split(',')) model = onnx.load(str(output_file)) # Model Optimizer takes output names from ONNX node names if they exist. # However, the names PyTorch assigns to the ONNX nodes are generic and # non-descriptive (e.g. ""Gemm_151""). By deleting these names, we make # MO fall back to the ONNX output names, which we can set to whatever we want. for node in model.graph.node: node.ClearField('name') try: onnx.checker.check_model(model) print('ONNX check passed successfully.') except onnx.onnx_cpp2py_export.checker.ValidationError as exc: sys.exit('ONNX check failed with error: ' + str(exc)) onnx.save(model, str(output_file)) ","def convert_to_onnx(model, input_shape, output_file, input_names, output_names): """"""Convert PyTorch model to ONNX and check the resulting onnx model"""""" output_file.parent.mkdir(parents=True, exist_ok=True) model.eval() dummy_input = torch.randn(input_shape) model(dummy_input) torch.onnx.export(model, dummy_input, str(output_file), verbose=False, opset_version=9, input_names=input_names.split(','), output_names=output_names.split(',')) model = onnx.load(str(output_file)) # Model Optimizer takes output names from ONNX node names if they exist. # However, the names PyTorch assigns to the ONNX nodes since version 1.5 are generic # and non-descriptive (e.g. ""Gemm_151""). By deleting these names, we make # MO fall back to the ONNX output names, which we can set to whatever we want. for node in model.graph.node: node.ClearField('name') try: onnx.checker.check_model(model) print('ONNX check passed successfully.') except onnx.onnx_cpp2py_export.checker.ValidationError as exc: sys.exit('ONNX check failed with error: ' + str(exc)) onnx.save(model, str(output_file)) " 4139,"def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, exclude_failures=False, show_all_warnings=False, **options): """""" Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them. :param module_list: As module list, pass either a glob pattern, a list of glob patterns or a list of Extension objects. The latter allows you to configure the extensions separately through the normal distutils options. You can also pass Extension objects that have glob patterns as their sources. Then, cythonize will resolve the pattern and create a copy of the Extension for every matching file. :param exclude: When passing glob patterns as ``module_list``, you can exclude certain module names explicitly by passing them into the ``exclude`` option. :param nthreads: The number of concurrent builds for parallel compilation (requires the ``multiprocessing`` module). :param aliases: If you want to use compiler directives like ``# distutils: ...`` but can only know at compile time (when running the ``setup.py``) which values to use, you can use aliases and pass a dictionary mapping those aliases to Python strings when calling :func:`cythonize`. As an example, say you want to use the compiler directive ``# distutils: include_dirs = ../static_libs/include/`` but this path isn't always fixed and you want to find it when running the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python variable called ``foo`` as a string, and then call ``cythonize(..., aliases={'MY_HEADERS': foo})``. :param quiet: If True, Cython won't print error, warning, or status messages during the compilation. :param force: Forces the recompilation of the Cython modules, even if the timestamps don't indicate that a recompilation is necessary. :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this will be determined at a per-file level based on compiler directives. This affects only modules found based on file names. Extension instances passed into :func:`cythonize` will not be changed. It is recommended to rather use the compiler directive ``# distutils: language = c++`` than this option. :param exclude_failures: For a broad 'try to compile' mode that ignores compilation failures and simply excludes the failed extensions, pass ``exclude_failures=True``. Note that this only really makes sense for compiling ``.py`` files which can also be used without compilation. :param show_all_warnings: By default, not all Cython warnings are printed. Set to true to show all warnings. :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` files compiled. The HTML file gives an indication of how much Python interaction there is in each of the source code lines, compared to plain C code. It also allows you to see the C/C++ code generated for each line of Cython code. This report is invaluable when optimizing a function for speed, and for determining when to :ref:`release the GIL `: in general, a ``nogil`` block may contain only ""white"" code. See examples in :ref:`determining_where_to_add_types` or :ref:`primes`. :param annotate-fullc: If ``True`` will produce a colorized HTML version of the source which includes entire generated C/C++-code. :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: ``compiler_directives={'embedsignature': True}``. See :ref:`compiler-directives`. :param depfile: produce depfiles for the sources if True. """""" if exclude is None: exclude = [] if 'include_path' not in options: options['include_path'] = ['.'] if 'common_utility_include_dir' in options: safe_makedirs(options['common_utility_include_dir']) depfile = options.pop('depfile', None) if pythran is None: pythran_options = None else: pythran_options = CompilationOptions(**options) pythran_options.cplus = True pythran_options.np_pythran = True c_options = CompilationOptions(**options) cpp_options = CompilationOptions(**options); cpp_options.cplus = True ctx = Context.from_options(c_options) options = c_options module_list, module_metadata = create_extension_list( module_list, exclude=exclude, ctx=ctx, quiet=quiet, exclude_failures=exclude_failures, language=language, aliases=aliases) fix_windows_unicode_modules(module_list) deps = create_dependency_tree(ctx, quiet=quiet) build_dir = getattr(options, 'build_dir', None) def copy_to_build_dir(filepath, root=os.getcwd()): filepath_abs = os.path.abspath(filepath) if os.path.isabs(filepath): filepath = filepath_abs if filepath_abs.startswith(root): # distutil extension depends are relative to cwd mod_dir = join_path(build_dir, os.path.dirname(_relpath(filepath, root))) copy_once_if_newer(filepath_abs, mod_dir) modules_by_cfile = collections.defaultdict(list) to_compile = [] for m in module_list: if build_dir: for dep in m.depends: copy_to_build_dir(dep) cy_sources = [ source for source in m.sources if os.path.splitext(source)[1] in ('.pyx', '.py')] if len(cy_sources) == 1: # normal ""special"" case: believe the Extension module name to allow user overrides full_module_name = m.name else: # infer FQMN from source files full_module_name = None new_sources = [] for source in m.sources: base, ext = os.path.splitext(source) if ext in ('.pyx', '.py'): if m.np_pythran: c_file = base + '.cpp' options = pythran_options elif m.language == 'c++': c_file = base + '.cpp' options = cpp_options else: c_file = base + '.c' options = c_options # setup for out of place build directory if enabled if build_dir: if os.path.isabs(c_file): c_file = os.path.splitdrive(c_file)[1] c_file = c_file.split(os.sep, 1)[1] c_file = os.path.join(build_dir, c_file) dir = os.path.dirname(c_file) safe_makedirs_once(dir) # write out the depfile, if requested if depfile: src_base_dir, _ = os.path.split(source) relpaths = [os.path.relpath(fname, src_base_dir) for fname in deps.all_dependencies(source) ] depline = os.path.split(c_file)[1] + "": "" depline += "" \ \n"".join(relpaths) + ""\n"" with open(c_file+'.dep', 'w') as outfile: outfile.write(depline) if os.path.exists(c_file): c_timestamp = os.path.getmtime(c_file) else: c_timestamp = -1 # Priority goes first to modified files, second to direct # dependents, and finally to indirect dependents. if c_timestamp < deps.timestamp(source): dep_timestamp, dep = deps.timestamp(source), source priority = 0 else: dep_timestamp, dep = deps.newest_dependency(source) priority = 2 - (dep in deps.immediate_dependencies(source)) if force or c_timestamp < dep_timestamp: if not quiet and not force: if source == dep: print(u""Compiling %s because it changed."" % Utils.decode_filename(source)) else: print(u""Compiling %s because it depends on %s."" % ( Utils.decode_filename(source), Utils.decode_filename(dep), )) if not force and options.cache: fingerprint = deps.transitive_fingerprint(source, m, options) else: fingerprint = None to_compile.append(( priority, source, c_file, fingerprint, quiet, options, not exclude_failures, module_metadata.get(m.name), full_module_name, show_all_warnings)) new_sources.append(c_file) modules_by_cfile[c_file].append(m) else: new_sources.append(source) if build_dir: copy_to_build_dir(source) m.sources = new_sources if options.cache: if not os.path.exists(options.cache): os.makedirs(options.cache) to_compile.sort() # Drop ""priority"" component of ""to_compile"" entries and add a # simple progress indicator. N = len(to_compile) progress_fmt = ""[{0:%d}/{1}] "" % len(str(N)) for i in range(N): progress = progress_fmt.format(i+1, N) to_compile[i] = to_compile[i][1:] + (progress,) if N <= 1: nthreads = 0 if nthreads: import multiprocessing pool = multiprocessing.Pool( nthreads, initializer=_init_multiprocessing_helper) # This is a bit more involved than it should be, because KeyboardInterrupts # break the multiprocessing workers when using a normal pool.map(). # See, for example: # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt try: result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1) pool.close() while not result.ready(): try: result.get(99999) # seconds except multiprocessing.TimeoutError: pass except KeyboardInterrupt: pool.terminate() raise pool.join() else: for args in to_compile: cythonize_one(*args) if exclude_failures: failed_modules = set() for c_file, modules in modules_by_cfile.items(): if not os.path.exists(c_file): failed_modules.update(modules) elif os.path.getsize(c_file) < 200: f = io_open(c_file, 'r', encoding='iso8859-1') try: if f.read(len('#error ')) == '#error ': # dead compilation result failed_modules.update(modules) finally: f.close() if failed_modules: for module in failed_modules: module_list.remove(module) print(u""Failed compilations: %s"" % ', '.join(sorted([ module.name for module in failed_modules]))) if options.cache: cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) # cythonize() is often followed by the (non-Python-buffered) # compiler output, flush now to avoid interleaving output. sys.stdout.flush() return module_list ","def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, exclude_failures=False, show_all_warnings=False, **options): """""" Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them. :param module_list: As module list, pass either a glob pattern, a list of glob patterns or a list of Extension objects. The latter allows you to configure the extensions separately through the normal distutils options. You can also pass Extension objects that have glob patterns as their sources. Then, cythonize will resolve the pattern and create a copy of the Extension for every matching file. :param exclude: When passing glob patterns as ``module_list``, you can exclude certain module names explicitly by passing them into the ``exclude`` option. :param nthreads: The number of concurrent builds for parallel compilation (requires the ``multiprocessing`` module). :param aliases: If you want to use compiler directives like ``# distutils: ...`` but can only know at compile time (when running the ``setup.py``) which values to use, you can use aliases and pass a dictionary mapping those aliases to Python strings when calling :func:`cythonize`. As an example, say you want to use the compiler directive ``# distutils: include_dirs = ../static_libs/include/`` but this path isn't always fixed and you want to find it when running the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python variable called ``foo`` as a string, and then call ``cythonize(..., aliases={'MY_HEADERS': foo})``. :param quiet: If True, Cython won't print error, warning, or status messages during the compilation. :param force: Forces the recompilation of the Cython modules, even if the timestamps don't indicate that a recompilation is necessary. :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this will be determined at a per-file level based on compiler directives. This affects only modules found based on file names. Extension instances passed into :func:`cythonize` will not be changed. It is recommended to rather use the compiler directive ``# distutils: language = c++`` than this option. :param exclude_failures: For a broad 'try to compile' mode that ignores compilation failures and simply excludes the failed extensions, pass ``exclude_failures=True``. Note that this only really makes sense for compiling ``.py`` files which can also be used without compilation. :param show_all_warnings: By default, not all Cython warnings are printed. Set to true to show all warnings. :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` files compiled. The HTML file gives an indication of how much Python interaction there is in each of the source code lines, compared to plain C code. It also allows you to see the C/C++ code generated for each line of Cython code. This report is invaluable when optimizing a function for speed, and for determining when to :ref:`release the GIL `: in general, a ``nogil`` block may contain only ""white"" code. See examples in :ref:`determining_where_to_add_types` or :ref:`primes`. :param annotate-fullc: If ``True`` will produce a colorized HTML version of the source which includes entire generated C/C++-code. :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: ``compiler_directives={'embedsignature': True}``. See :ref:`compiler-directives`. :param depfile: produce depfiles for the sources if True. """""" if exclude is None: exclude = [] if 'include_path' not in options: options['include_path'] = ['.'] if 'common_utility_include_dir' in options: safe_makedirs(options['common_utility_include_dir']) depfile = options.pop('depfile', None) if pythran is None: pythran_options = None else: pythran_options = CompilationOptions(**options) pythran_options.cplus = True pythran_options.np_pythran = True c_options = CompilationOptions(**options) cpp_options = CompilationOptions(**options); cpp_options.cplus = True ctx = Context.from_options(c_options) options = c_options module_list, module_metadata = create_extension_list( module_list, exclude=exclude, ctx=ctx, quiet=quiet, exclude_failures=exclude_failures, language=language, aliases=aliases) fix_windows_unicode_modules(module_list) deps = create_dependency_tree(ctx, quiet=quiet) build_dir = getattr(options, 'build_dir', None) def copy_to_build_dir(filepath, root=os.getcwd()): filepath_abs = os.path.abspath(filepath) if os.path.isabs(filepath): filepath = filepath_abs if filepath_abs.startswith(root): # distutil extension depends are relative to cwd mod_dir = join_path(build_dir, os.path.dirname(_relpath(filepath, root))) copy_once_if_newer(filepath_abs, mod_dir) modules_by_cfile = collections.defaultdict(list) to_compile = [] for m in module_list: if build_dir: for dep in m.depends: copy_to_build_dir(dep) cy_sources = [ source for source in m.sources if os.path.splitext(source)[1] in ('.pyx', '.py')] if len(cy_sources) == 1: # normal ""special"" case: believe the Extension module name to allow user overrides full_module_name = m.name else: # infer FQMN from source files full_module_name = None new_sources = [] for source in m.sources: base, ext = os.path.splitext(source) if ext in ('.pyx', '.py'): if m.np_pythran: c_file = base + '.cpp' options = pythran_options elif m.language == 'c++': c_file = base + '.cpp' options = cpp_options else: c_file = base + '.c' options = c_options # setup for out of place build directory if enabled if build_dir: if os.path.isabs(c_file): c_file = os.path.splitdrive(c_file)[1] c_file = c_file.split(os.sep, 1)[1] c_file = os.path.join(build_dir, c_file) dir = os.path.dirname(c_file) safe_makedirs_once(dir) # write out the depfile, if requested if depfile: src_base_dir, _ = os.path.split(source) relpaths = [os.path.relpath(fname, src_base_dir) for fname in deps.all_dependencies(source) ] depline = os.path.split(c_file)[1] + "": "" depline += "" \\\n "".join(relpaths) + ""\n"" with open(c_file+'.dep', 'w') as outfile: outfile.write(depline) if os.path.exists(c_file): c_timestamp = os.path.getmtime(c_file) else: c_timestamp = -1 # Priority goes first to modified files, second to direct # dependents, and finally to indirect dependents. if c_timestamp < deps.timestamp(source): dep_timestamp, dep = deps.timestamp(source), source priority = 0 else: dep_timestamp, dep = deps.newest_dependency(source) priority = 2 - (dep in deps.immediate_dependencies(source)) if force or c_timestamp < dep_timestamp: if not quiet and not force: if source == dep: print(u""Compiling %s because it changed."" % Utils.decode_filename(source)) else: print(u""Compiling %s because it depends on %s."" % ( Utils.decode_filename(source), Utils.decode_filename(dep), )) if not force and options.cache: fingerprint = deps.transitive_fingerprint(source, m, options) else: fingerprint = None to_compile.append(( priority, source, c_file, fingerprint, quiet, options, not exclude_failures, module_metadata.get(m.name), full_module_name, show_all_warnings)) new_sources.append(c_file) modules_by_cfile[c_file].append(m) else: new_sources.append(source) if build_dir: copy_to_build_dir(source) m.sources = new_sources if options.cache: if not os.path.exists(options.cache): os.makedirs(options.cache) to_compile.sort() # Drop ""priority"" component of ""to_compile"" entries and add a # simple progress indicator. N = len(to_compile) progress_fmt = ""[{0:%d}/{1}] "" % len(str(N)) for i in range(N): progress = progress_fmt.format(i+1, N) to_compile[i] = to_compile[i][1:] + (progress,) if N <= 1: nthreads = 0 if nthreads: import multiprocessing pool = multiprocessing.Pool( nthreads, initializer=_init_multiprocessing_helper) # This is a bit more involved than it should be, because KeyboardInterrupts # break the multiprocessing workers when using a normal pool.map(). # See, for example: # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt try: result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1) pool.close() while not result.ready(): try: result.get(99999) # seconds except multiprocessing.TimeoutError: pass except KeyboardInterrupt: pool.terminate() raise pool.join() else: for args in to_compile: cythonize_one(*args) if exclude_failures: failed_modules = set() for c_file, modules in modules_by_cfile.items(): if not os.path.exists(c_file): failed_modules.update(modules) elif os.path.getsize(c_file) < 200: f = io_open(c_file, 'r', encoding='iso8859-1') try: if f.read(len('#error ')) == '#error ': # dead compilation result failed_modules.update(modules) finally: f.close() if failed_modules: for module in failed_modules: module_list.remove(module) print(u""Failed compilations: %s"" % ', '.join(sorted([ module.name for module in failed_modules]))) if options.cache: cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) # cythonize() is often followed by the (non-Python-buffered) # compiler output, flush now to avoid interleaving output. sys.stdout.flush() return module_list " 32142,"def verify_labels(pr_label_names): """""" Verify that the external PR contains the following labels: 'Contribution Form Filled' and either one of 'Community'/'Partner'/'internal' labels """""" is_contribution_form_filled_label_exist = 'Contribution Form Filled' in pr_label_names is_community_label_exist = 'Community' in pr_label_names is_partner_label_exist = 'Partner' in pr_label_names is_internal_label_exist = 'internal' in pr_label_names if is_contribution_form_filled_label_exist: if is_community_label_exist and not is_partner_label_exist and not is_internal_label_exist: return True elif not is_community_label_exist and is_partner_label_exist and not is_internal_label_exist: return True elif not is_community_label_exist and not is_partner_label_exist and is_internal_label_exist: return True else: return False return False ","def verify_labels(pr_label_names): """""" Verify that the external PR contains the following labels: 'Contribution Form Filled' and either one of 'Community'/'Partner'/'internal' labels """""" is_contribution_form_filled_label_exist = 'Contribution Form Filled' in pr_label_names is_community_label_exist = 'Community' in pr_label_names is_partner_label_exist = 'Partner' in pr_label_names is_internal_label_exist = 'internal' in pr_label_names if is_contribution_form_filled_label_exist: return is_community_label_exist ^ is_partner_label_exist ^ is_internal_label_exist return False " 49664,"def attrs( maybe_cls=None, these=None, repr_ns=None, repr=None, cmp=None, hash=None, init=None, slots=False, frozen=False, weakref_slot=True, str=False, auto_attribs=False, kw_only=False, cache_hash=False, auto_exc=False, eq=None, order=None, auto_detect=False, collect_by_mro=False, getstate_setstate=None, on_setattr=None, field_transformer=None, match_args=True, ): r"""""" A class decorator that adds `dunder `_\ -methods according to the specified attributes using `attr.ib` or the *these* argument. :param these: A dictionary of name to `attr.ib` mappings. This is useful to avoid the definition of your attributes within the class body because you can't (e.g. if you want to add ``__repr__`` methods to Django models) or don't want to. If *these* is not ``None``, ``attrs`` will *not* search the class body for attributes and will *not* remove any attributes from it. If *these* is an ordered dict (`dict` on Python 3.6+, `collections.OrderedDict` otherwise), the order is deduced from the order of the attributes inside *these*. Otherwise the order of the definition of the attributes is used. :type these: `dict` of `str` to `attr.ib` :param str repr_ns: When using nested classes, there's no way in Python 2 to automatically detect that. Therefore it's possible to set the namespace explicitly for a more meaningful ``repr`` output. :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, *order*, and *hash* arguments explicitly, assume they are set to ``True`` **unless any** of the involved methods for one of the arguments is implemented in the *current* class (i.e. it is *not* inherited from some base class). So for example by implementing ``__eq__`` on a class yourself, ``attrs`` will deduce ``eq=False`` and will create *neither* ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible ``__ne__`` by default, so it *should* be enough to only implement ``__eq__`` in most cases). .. warning:: If you prevent ``attrs`` from creating the ordering methods for you (``order=False``, e.g. by implementing ``__le__``), it becomes *your* responsibility to make sure its ordering is sound. The best way is to use the `functools.total_ordering` decorator. Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, *cmp*, or *hash* overrides whatever *auto_detect* would determine. *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises a `PythonTooOldError`. :param bool repr: Create a ``__repr__`` method with a human readable representation of ``attrs`` attributes.. :param bool str: Create a ``__str__`` method that is identical to ``__repr__``. This is usually not necessary except for `Exception`\ s. :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` and ``__ne__`` methods that check two instances for equality. They compare the instances as if they were tuples of their ``attrs`` attributes if and only if the types of both classes are *identical*! :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` methods that behave like *eq* above and allow instances to be ordered. If ``None`` (default) mirror value of *eq*. :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the same value. Must not be mixed with *eq* or *order*. :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method is generated according how *eq* and *frozen* are set. 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to None, marking it unhashable (which it is). 3. If *eq* is False, ``__hash__`` will be left untouched meaning the ``__hash__`` method of the base class will be used (if base class is ``object``, this means it will fall back to id-based hashing.). Although not recommended, you can decide for yourself and force ``attrs`` to create one (e.g. if the class is immutable even though you didn't freeze it programmatically) by passing ``True`` or not. Both of these cases are rather special and should be used carefully. See our documentation on `hashing`, Python's documentation on `object.__hash__`, and the `GitHub issue that led to the default \ behavior `_ for more details. :param bool init: Create a ``__init__`` method that initializes the ``attrs`` attributes. Leading underscores are stripped for the argument name. If a ``__attrs_pre_init__`` method exists on the class, it will be called before the class is initialized. If a ``__attrs_post_init__`` method exists on the class, it will be called after the class is fully initialized. If ``init`` is ``False``, an ``__attrs_init__`` method will be injected instead. This allows you to define a custom ``__init__`` method that can do pre-init work such as ``super().__init__()``, and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. :param bool slots: Create a `slotted class ` that's more memory-efficient. Slotted classes are generally superior to the default dict classes, but have some gotchas you should know about, so we encourage you to read the `glossary entry `. :param bool frozen: Make instances immutable after initialization. If someone attempts to modify a frozen instance, `attr.exceptions.FrozenInstanceError` is raised. .. note:: 1. This is achieved by installing a custom ``__setattr__`` method on your class, so you can't implement your own. 2. True immutability is impossible in Python. 3. This *does* have a minor a runtime performance `impact ` when initializing new instances. In other words: ``__init__`` is slightly slower with ``frozen=True``. 4. If a class is frozen, you cannot modify ``self`` in ``__attrs_post_init__`` or a self-written ``__init__``. You can circumvent that limitation by using ``object.__setattr__(self, ""attribute_name"", value)``. 5. Subclasses of a frozen class are frozen too. :param bool weakref_slot: Make instances weak-referenceable. This has no effect unless ``slots`` is also enabled. :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated attributes (Python 3.6 and later only) from the class body. In this case, you **must** annotate every field. If ``attrs`` encounters a field that is set to an `attr.ib` but lacks a type annotation, an `attr.exceptions.UnannotatedAttributeError` is raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't want to set a type. If you assign a value to those attributes (e.g. ``x: int = 42``), that value becomes the default value like if it were passed using ``attr.ib(default=42)``. Passing an instance of `Factory` also works as expected in most cases (see warning below). Attributes annotated as `typing.ClassVar`, and attributes that are neither annotated nor set to an `attr.ib` are **ignored**. .. warning:: For features that use the attribute name to create decorators (e.g. `validators `), you still *must* assign `attr.ib` to them. Otherwise Python will either not find the name or try to use the default value to call e.g. ``validator`` on it. These errors can be quite confusing and probably the most common bug report on our bug tracker. .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ :param bool kw_only: Make all attributes keyword-only (Python 3+) in the generated ``__init__`` (if ``init`` is ``False``, this parameter is ignored). :param bool cache_hash: Ensure that the object's hash code is computed only once and stored on the object. If this is set to ``True``, hashing must be either explicitly or implicitly enabled for this class. If the hash code is cached, avoid any reassignments of fields involved in hash code computation or mutations of the objects those fields point to after object creation. If such changes occur, the behavior of the object's hash code is undefined. :param bool auto_exc: If the class subclasses `BaseException` (which implicitly includes any subclass of any exception), the following happens to behave like a well-behaved Python exceptions class: - the values for *eq*, *order*, and *hash* are ignored and the instances compare and hash by the instance's ids (N.B. ``attrs`` will *not* remove existing implementations of ``__hash__`` or the equality methods. It just won't add own ones.), - all attributes that are either passed into ``__init__`` or have a default value are additionally available as a tuple in the ``args`` attribute, - the value of *str* is ignored leaving ``__str__`` to base classes. :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` collects attributes from base classes. The default behavior is incorrect in certain cases of multiple inheritance. It should be on by default but is kept off for backward-compatibility. See issue `#428 `_ for more details. :param Optional[bool] getstate_setstate: .. note:: This is usually only interesting for slotted classes and you should probably just set *auto_detect* to `True`. If `True`, ``__getstate__`` and ``__setstate__`` are generated and attached to the class. This is necessary for slotted classes to be pickleable. If left `None`, it's `True` by default for slotted classes and ``False`` for dict classes. If *auto_detect* is `True`, and *getstate_setstate* is left `None`, and **either** ``__getstate__`` or ``__setstate__`` is detected directly on the class (i.e. not inherited), it is set to `False` (this is usually what you want). :param on_setattr: A callable that is run whenever the user attempts to set an attribute (either by assignment like ``i.x = 42`` or by using `setattr` like ``setattr(i, ""x"", 42)``). It receives the same arguments as validators: the instance, the attribute that is being modified, and the new value. If no exception is raised, the attribute is set to the return value of the callable. If a list of callables is passed, they're automatically wrapped in an `attr.setters.pipe`. :param Optional[callable] field_transformer: A function that is called with the original class object and all fields right before ``attrs`` finalizes the class. You can use this, e.g., to automatically add converters or validators to fields based on their types. See `transform-fields` for more details. :param bool match_args: If `True` it sets __match_args__ in the class to support PEP 634. It is a tuple of __init__ parameter names that are only positional arguments. .. versionadded:: 16.0.0 *slots* .. versionadded:: 16.1.0 *frozen* .. versionadded:: 16.3.0 *str* .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. .. versionchanged:: 17.1.0 *hash* supports ``None`` as value which is also the default now. .. versionadded:: 17.3.0 *auto_attribs* .. versionchanged:: 18.1.0 If *these* is passed, no attributes are deleted from the class body. .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. .. versionadded:: 18.2.0 *weakref_slot* .. deprecated:: 18.2.0 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a `DeprecationWarning` if the classes compared are subclasses of each other. ``__eq`` and ``__ne__`` never tried to compared subclasses to each other. .. versionchanged:: 19.2.0 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider subclasses comparable anymore. .. versionadded:: 18.2.0 *kw_only* .. versionadded:: 18.2.0 *cache_hash* .. versionadded:: 19.1.0 *auto_exc* .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. .. versionadded:: 19.2.0 *eq* and *order* .. versionadded:: 20.1.0 *auto_detect* .. versionadded:: 20.1.0 *collect_by_mro* .. versionadded:: 20.1.0 *getstate_setstate* .. versionadded:: 20.1.0 *on_setattr* .. versionadded:: 20.3.0 *field_transformer* .. versionchanged:: 21.1.0 ``init=False`` injects ``__attrs_init__`` .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` .. versionchanged:: 21.1.0 *cmp* undeprecated .. versionadded:: 21.3.0 *match_args* """""" if auto_detect and PY2: raise PythonTooOldError( ""auto_detect only works on Python 3 and later."" ) eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) hash_ = hash # work around the lack of nonlocal if isinstance(on_setattr, (list, tuple)): on_setattr = setters.pipe(*on_setattr) def wrap(cls): if getattr(cls, ""__class__"", None) is None: raise TypeError(""attrs only works with new-style classes."") is_frozen = frozen or _has_frozen_base_class(cls) is_exc = auto_exc is True and issubclass(cls, BaseException) has_own_setattr = auto_detect and _has_own_attribute( cls, ""__setattr__"" ) if has_own_setattr and is_frozen: raise ValueError(""Can't freeze a class with a custom __setattr__."") builder = _ClassBuilder( cls, these, slots, is_frozen, weakref_slot, _determine_whether_to_implement( cls, getstate_setstate, auto_detect, (""__getstate__"", ""__setstate__""), default=slots, ), auto_attribs, kw_only, cache_hash, is_exc, collect_by_mro, on_setattr, has_own_setattr, field_transformer, ) if _determine_whether_to_implement( cls, repr, auto_detect, (""__repr__"",) ): builder.add_repr(repr_ns) if str is True: builder.add_str() eq = _determine_whether_to_implement( cls, eq_, auto_detect, (""__eq__"", ""__ne__"") ) if not is_exc and eq is True: builder.add_eq() if not is_exc and _determine_whether_to_implement( cls, order_, auto_detect, (""__lt__"", ""__le__"", ""__gt__"", ""__ge__"") ): builder.add_order() builder.add_setattr() if ( hash_ is None and auto_detect is True and _has_own_attribute(cls, ""__hash__"") ): hash = False else: hash = hash_ if hash is not True and hash is not False and hash is not None: # Can't use `hash in` because 1 == True for example. raise TypeError( ""Invalid value for hash. Must be True, False, or None."" ) elif hash is False or (hash is None and eq is False) or is_exc: # Don't do anything. Should fall back to __object__'s __hash__ # which is by id. if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" hashing must be either explicitly or implicitly "" ""enabled."" ) elif hash is True or ( hash is None and eq is True and is_frozen is True ): # Build a __hash__ if told so, or if it's safe. builder.add_hash() else: # Raise TypeError on attempts to hash. if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" hashing must be either explicitly or implicitly "" ""enabled."" ) builder.make_unhashable() if _determine_whether_to_implement( cls, init, auto_detect, (""__init__"",) ): builder.add_init() else: builder.add_attrs_init() if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" init must be True."" ) if match_args and ""__match_args__"" not in cls.__dict__: builder.add_match_args() return builder.build_class() # maybe_cls's type depends on the usage of the decorator. It's a class # if it's used as `@attrs` but ``None`` if used as `@attrs()`. if maybe_cls is None: return wrap else: return wrap(maybe_cls) ","def attrs( maybe_cls=None, these=None, repr_ns=None, repr=None, cmp=None, hash=None, init=None, slots=False, frozen=False, weakref_slot=True, str=False, auto_attribs=False, kw_only=False, cache_hash=False, auto_exc=False, eq=None, order=None, auto_detect=False, collect_by_mro=False, getstate_setstate=None, on_setattr=None, field_transformer=None, match_args=True, ): r"""""" A class decorator that adds `dunder `_\ -methods according to the specified attributes using `attr.ib` or the *these* argument. :param these: A dictionary of name to `attr.ib` mappings. This is useful to avoid the definition of your attributes within the class body because you can't (e.g. if you want to add ``__repr__`` methods to Django models) or don't want to. If *these* is not ``None``, ``attrs`` will *not* search the class body for attributes and will *not* remove any attributes from it. If *these* is an ordered dict (`dict` on Python 3.6+, `collections.OrderedDict` otherwise), the order is deduced from the order of the attributes inside *these*. Otherwise the order of the definition of the attributes is used. :type these: `dict` of `str` to `attr.ib` :param str repr_ns: When using nested classes, there's no way in Python 2 to automatically detect that. Therefore it's possible to set the namespace explicitly for a more meaningful ``repr`` output. :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, *order*, and *hash* arguments explicitly, assume they are set to ``True`` **unless any** of the involved methods for one of the arguments is implemented in the *current* class (i.e. it is *not* inherited from some base class). So for example by implementing ``__eq__`` on a class yourself, ``attrs`` will deduce ``eq=False`` and will create *neither* ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible ``__ne__`` by default, so it *should* be enough to only implement ``__eq__`` in most cases). .. warning:: If you prevent ``attrs`` from creating the ordering methods for you (``order=False``, e.g. by implementing ``__le__``), it becomes *your* responsibility to make sure its ordering is sound. The best way is to use the `functools.total_ordering` decorator. Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, *cmp*, or *hash* overrides whatever *auto_detect* would determine. *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises a `PythonTooOldError`. :param bool repr: Create a ``__repr__`` method with a human readable representation of ``attrs`` attributes.. :param bool str: Create a ``__str__`` method that is identical to ``__repr__``. This is usually not necessary except for `Exception`\ s. :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` and ``__ne__`` methods that check two instances for equality. They compare the instances as if they were tuples of their ``attrs`` attributes if and only if the types of both classes are *identical*! :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` methods that behave like *eq* above and allow instances to be ordered. If ``None`` (default) mirror value of *eq*. :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the same value. Must not be mixed with *eq* or *order*. :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method is generated according how *eq* and *frozen* are set. 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to None, marking it unhashable (which it is). 3. If *eq* is False, ``__hash__`` will be left untouched meaning the ``__hash__`` method of the base class will be used (if base class is ``object``, this means it will fall back to id-based hashing.). Although not recommended, you can decide for yourself and force ``attrs`` to create one (e.g. if the class is immutable even though you didn't freeze it programmatically) by passing ``True`` or not. Both of these cases are rather special and should be used carefully. See our documentation on `hashing`, Python's documentation on `object.__hash__`, and the `GitHub issue that led to the default \ behavior `_ for more details. :param bool init: Create a ``__init__`` method that initializes the ``attrs`` attributes. Leading underscores are stripped for the argument name. If a ``__attrs_pre_init__`` method exists on the class, it will be called before the class is initialized. If a ``__attrs_post_init__`` method exists on the class, it will be called after the class is fully initialized. If ``init`` is ``False``, an ``__attrs_init__`` method will be injected instead. This allows you to define a custom ``__init__`` method that can do pre-init work such as ``super().__init__()``, and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. :param bool slots: Create a `slotted class ` that's more memory-efficient. Slotted classes are generally superior to the default dict classes, but have some gotchas you should know about, so we encourage you to read the `glossary entry `. :param bool frozen: Make instances immutable after initialization. If someone attempts to modify a frozen instance, `attr.exceptions.FrozenInstanceError` is raised. .. note:: 1. This is achieved by installing a custom ``__setattr__`` method on your class, so you can't implement your own. 2. True immutability is impossible in Python. 3. This *does* have a minor a runtime performance `impact ` when initializing new instances. In other words: ``__init__`` is slightly slower with ``frozen=True``. 4. If a class is frozen, you cannot modify ``self`` in ``__attrs_post_init__`` or a self-written ``__init__``. You can circumvent that limitation by using ``object.__setattr__(self, ""attribute_name"", value)``. 5. Subclasses of a frozen class are frozen too. :param bool weakref_slot: Make instances weak-referenceable. This has no effect unless ``slots`` is also enabled. :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated attributes (Python 3.6 and later only) from the class body. In this case, you **must** annotate every field. If ``attrs`` encounters a field that is set to an `attr.ib` but lacks a type annotation, an `attr.exceptions.UnannotatedAttributeError` is raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't want to set a type. If you assign a value to those attributes (e.g. ``x: int = 42``), that value becomes the default value like if it were passed using ``attr.ib(default=42)``. Passing an instance of `Factory` also works as expected in most cases (see warning below). Attributes annotated as `typing.ClassVar`, and attributes that are neither annotated nor set to an `attr.ib` are **ignored**. .. warning:: For features that use the attribute name to create decorators (e.g. `validators `), you still *must* assign `attr.ib` to them. Otherwise Python will either not find the name or try to use the default value to call e.g. ``validator`` on it. These errors can be quite confusing and probably the most common bug report on our bug tracker. .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ :param bool kw_only: Make all attributes keyword-only (Python 3+) in the generated ``__init__`` (if ``init`` is ``False``, this parameter is ignored). :param bool cache_hash: Ensure that the object's hash code is computed only once and stored on the object. If this is set to ``True``, hashing must be either explicitly or implicitly enabled for this class. If the hash code is cached, avoid any reassignments of fields involved in hash code computation or mutations of the objects those fields point to after object creation. If such changes occur, the behavior of the object's hash code is undefined. :param bool auto_exc: If the class subclasses `BaseException` (which implicitly includes any subclass of any exception), the following happens to behave like a well-behaved Python exceptions class: - the values for *eq*, *order*, and *hash* are ignored and the instances compare and hash by the instance's ids (N.B. ``attrs`` will *not* remove existing implementations of ``__hash__`` or the equality methods. It just won't add own ones.), - all attributes that are either passed into ``__init__`` or have a default value are additionally available as a tuple in the ``args`` attribute, - the value of *str* is ignored leaving ``__str__`` to base classes. :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` collects attributes from base classes. The default behavior is incorrect in certain cases of multiple inheritance. It should be on by default but is kept off for backward-compatibility. See issue `#428 `_ for more details. :param Optional[bool] getstate_setstate: .. note:: This is usually only interesting for slotted classes and you should probably just set *auto_detect* to `True`. If `True`, ``__getstate__`` and ``__setstate__`` are generated and attached to the class. This is necessary for slotted classes to be pickleable. If left `None`, it's `True` by default for slotted classes and ``False`` for dict classes. If *auto_detect* is `True`, and *getstate_setstate* is left `None`, and **either** ``__getstate__`` or ``__setstate__`` is detected directly on the class (i.e. not inherited), it is set to `False` (this is usually what you want). :param on_setattr: A callable that is run whenever the user attempts to set an attribute (either by assignment like ``i.x = 42`` or by using `setattr` like ``setattr(i, ""x"", 42)``). It receives the same arguments as validators: the instance, the attribute that is being modified, and the new value. If no exception is raised, the attribute is set to the return value of the callable. If a list of callables is passed, they're automatically wrapped in an `attr.setters.pipe`. :param Optional[callable] field_transformer: A function that is called with the original class object and all fields right before ``attrs`` finalizes the class. You can use this, e.g., to automatically add converters or validators to fields based on their types. See `transform-fields` for more details. :param bool match_args: If `True`, set ``__match_args__`` on the class to support `PEP 634 `_ (Structural Pattern Matching). It is a tuple of all positional-only ``__init__`` parameter names. .. versionadded:: 16.0.0 *slots* .. versionadded:: 16.1.0 *frozen* .. versionadded:: 16.3.0 *str* .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. .. versionchanged:: 17.1.0 *hash* supports ``None`` as value which is also the default now. .. versionadded:: 17.3.0 *auto_attribs* .. versionchanged:: 18.1.0 If *these* is passed, no attributes are deleted from the class body. .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. .. versionadded:: 18.2.0 *weakref_slot* .. deprecated:: 18.2.0 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a `DeprecationWarning` if the classes compared are subclasses of each other. ``__eq`` and ``__ne__`` never tried to compared subclasses to each other. .. versionchanged:: 19.2.0 ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider subclasses comparable anymore. .. versionadded:: 18.2.0 *kw_only* .. versionadded:: 18.2.0 *cache_hash* .. versionadded:: 19.1.0 *auto_exc* .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. .. versionadded:: 19.2.0 *eq* and *order* .. versionadded:: 20.1.0 *auto_detect* .. versionadded:: 20.1.0 *collect_by_mro* .. versionadded:: 20.1.0 *getstate_setstate* .. versionadded:: 20.1.0 *on_setattr* .. versionadded:: 20.3.0 *field_transformer* .. versionchanged:: 21.1.0 ``init=False`` injects ``__attrs_init__`` .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` .. versionchanged:: 21.1.0 *cmp* undeprecated .. versionadded:: 21.3.0 *match_args* """""" if auto_detect and PY2: raise PythonTooOldError( ""auto_detect only works on Python 3 and later."" ) eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) hash_ = hash # work around the lack of nonlocal if isinstance(on_setattr, (list, tuple)): on_setattr = setters.pipe(*on_setattr) def wrap(cls): if getattr(cls, ""__class__"", None) is None: raise TypeError(""attrs only works with new-style classes."") is_frozen = frozen or _has_frozen_base_class(cls) is_exc = auto_exc is True and issubclass(cls, BaseException) has_own_setattr = auto_detect and _has_own_attribute( cls, ""__setattr__"" ) if has_own_setattr and is_frozen: raise ValueError(""Can't freeze a class with a custom __setattr__."") builder = _ClassBuilder( cls, these, slots, is_frozen, weakref_slot, _determine_whether_to_implement( cls, getstate_setstate, auto_detect, (""__getstate__"", ""__setstate__""), default=slots, ), auto_attribs, kw_only, cache_hash, is_exc, collect_by_mro, on_setattr, has_own_setattr, field_transformer, ) if _determine_whether_to_implement( cls, repr, auto_detect, (""__repr__"",) ): builder.add_repr(repr_ns) if str is True: builder.add_str() eq = _determine_whether_to_implement( cls, eq_, auto_detect, (""__eq__"", ""__ne__"") ) if not is_exc and eq is True: builder.add_eq() if not is_exc and _determine_whether_to_implement( cls, order_, auto_detect, (""__lt__"", ""__le__"", ""__gt__"", ""__ge__"") ): builder.add_order() builder.add_setattr() if ( hash_ is None and auto_detect is True and _has_own_attribute(cls, ""__hash__"") ): hash = False else: hash = hash_ if hash is not True and hash is not False and hash is not None: # Can't use `hash in` because 1 == True for example. raise TypeError( ""Invalid value for hash. Must be True, False, or None."" ) elif hash is False or (hash is None and eq is False) or is_exc: # Don't do anything. Should fall back to __object__'s __hash__ # which is by id. if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" hashing must be either explicitly or implicitly "" ""enabled."" ) elif hash is True or ( hash is None and eq is True and is_frozen is True ): # Build a __hash__ if told so, or if it's safe. builder.add_hash() else: # Raise TypeError on attempts to hash. if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" hashing must be either explicitly or implicitly "" ""enabled."" ) builder.make_unhashable() if _determine_whether_to_implement( cls, init, auto_detect, (""__init__"",) ): builder.add_init() else: builder.add_attrs_init() if cache_hash: raise TypeError( ""Invalid value for cache_hash. To use hash caching,"" "" init must be True."" ) if match_args and ""__match_args__"" not in cls.__dict__: builder.add_match_args() return builder.build_class() # maybe_cls's type depends on the usage of the decorator. It's a class # if it's used as `@attrs` but ``None`` if used as `@attrs()`. if maybe_cls is None: return wrap else: return wrap(maybe_cls) " 442,"def generate_from_case_export_instance(export_instance, output_file): assert isinstance(export_instance, CaseExportInstance) if not export_instance.selected_tables: raise DETConfigError(_(f'No Tables found in Export {export_instance.name}')) main_input_table = export_instance.selected_tables[0] main_output_table = DETTable( name=main_input_table.label, source='case', filter_name='type', filter_value=export_instance.case_type, rows=[], ) output = DETConfig(name=export_instance.name, tables=[main_output_table]) _add_rows_for_table(main_input_table, main_output_table, path_transform_fn=_transform_path_for_case_properties) # todo: add rows for other tables output.export_to_file(output_file) ","def generate_from_case_export_instance(export_instance, output_file): assert isinstance(export_instance, CaseExportInstance) if not export_instance.selected_tables: raise DETConfigError(_('No Tables found in Export {name}').format(name=export_instance.name)) main_input_table = export_instance.selected_tables[0] main_output_table = DETTable( name=main_input_table.label, source='case', filter_name='type', filter_value=export_instance.case_type, rows=[], ) output = DETConfig(name=export_instance.name, tables=[main_output_table]) _add_rows_for_table(main_input_table, main_output_table, path_transform_fn=_transform_path_for_case_properties) # todo: add rows for other tables output.export_to_file(output_file) " 1638,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression() >>> clf.fit(X,y) LogisticRegression() >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=42) >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X, y) >>> clf.fit(X,y) LogisticRegression() >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=42) >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 34979,"def is_let(var, value, body): """""" Syntatic sugar for creating an IfPattern. Parameters ---------- var: tvm.relay.dataflow_pattern.DFPattern The pattern describing the variable of Let. value: tvm.relay.dataflow_pattern.DFPattern The pattern describing the value of Let. body: tvm.relay.dataflow_pattern.DFPattern The pattern describing the body where the binding is in effect. Returns ------- result: tvm.relay.dataflow_pattern.DFPattern The resulting pattern. """""" return LetPattern(var, value, body) ","def is_let(var, value, body): """""" Syntatic sugar for creating a LetPattern. Parameters ---------- var: tvm.relay.dataflow_pattern.DFPattern The pattern describing the variable of Let. value: tvm.relay.dataflow_pattern.DFPattern The pattern describing the value of Let. body: tvm.relay.dataflow_pattern.DFPattern The pattern describing the body where the binding is in effect. Returns ------- result: tvm.relay.dataflow_pattern.DFPattern The resulting pattern. """""" return LetPattern(var, value, body) " 7315,"def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, overlap=.5, *, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_, [2]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional The minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : tuple of ints, int, or False, optional If tuple of ints, the length of the tuple must match the input array's dimensionality. Each element of the tuple will exclude peaks from within `exclude_border`-pixels of the border of the image along that dimension. If nonzero int, `exclude_border` excludes peaks from within `exclude_border`-pixels of the border of the image. If zero or False, peaks are identified regardless of their distance from the border. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. See also -------- skimage.filters.difference_of_gaussians References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach .. [2] Lowe, D. G. ""Distinctive Image Features from Scale-Invariant Keypoints."" International Journal of Computer Vision 60, 91–110 (2004). https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf :DOI:`10.1023/B:VISI.0000029664.99615.94` Examples -------- >>> from skimage import data, feature >>> coins = data.coins() >>> feature.blob_dog(coins, threshold=.05, min_sigma=10, max_sigma=40) array([[128., 155., 10.], [198., 155., 10.], [124., 338., 10.], [127., 102., 10.], [193., 281., 10.], [126., 208., 10.], [267., 115., 10.], [197., 102., 10.], [198., 215., 10.], [123., 279., 10.], [126., 46., 10.], [259., 247., 10.], [196., 43., 10.], [ 54., 276., 10.], [267., 358., 10.], [ 58., 100., 10.], [259., 305., 10.], [185., 347., 16.], [261., 174., 16.], [ 46., 336., 16.], [ 54., 217., 10.], [ 55., 157., 10.], [ 57., 41., 10.], [260., 47., 16.]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) # if both min and max sigma are scalar, function returns only one sigma scalar_sigma = np.isscalar(max_sigma) and np.isscalar(min_sigma) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if np.isscalar(max_sigma): max_sigma = np.full(image.ndim, max_sigma, dtype=float_dtype) if np.isscalar(min_sigma): min_sigma = np.full(image.ndim, min_sigma, dtype=float_dtype) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=float_dtype) max_sigma = np.asarray(max_sigma, dtype=float_dtype) if sigma_ratio <= 1.0: raise ValueError('sigma_ratio must be > 1.0') # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # normalization factor for consistency DoG magnitude sf = 1 / (sigma_ratio - 1) # computing difference between two successive Gaussian blurred images # to obtain an approximation of the scale invariant Laplacian of the # Gaussian operator dog_images = [ (gaussian_images[i] - gaussian_images[i + 1]) * sf for i in range(k) ] image_cube = np.stack(dog_images, axis=-1) exclude_border = _format_exclude_border(image.ndim, exclude_border) local_maxima = peak_local_max( image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border, ) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(float_dtype) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] if scalar_sigma: # select one sigma column, keeping dimension sigmas_of_peaks = sigmas_of_peaks[:, 0:1] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) sigma_dim = sigmas_of_peaks.shape[1] return _prune_blobs(lm, overlap, sigma_dim=sigma_dim) ","def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.5, overlap=.5, *, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_, [2]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional The minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : tuple of ints, int, or False, optional If tuple of ints, the length of the tuple must match the input array's dimensionality. Each element of the tuple will exclude peaks from within `exclude_border`-pixels of the border of the image along that dimension. If nonzero int, `exclude_border` excludes peaks from within `exclude_border`-pixels of the border of the image. If zero or False, peaks are identified regardless of their distance from the border. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. See also -------- skimage.filters.difference_of_gaussians References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach .. [2] Lowe, D. G. ""Distinctive Image Features from Scale-Invariant Keypoints."" International Journal of Computer Vision 60, 91–110 (2004). https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf :DOI:`10.1023/B:VISI.0000029664.99615.94` Examples -------- >>> from skimage import data, feature >>> coins = data.coins() >>> feature.blob_dog(coins, threshold=.05, min_sigma=10, max_sigma=40) array([[128., 155., 10.], [198., 155., 10.], [124., 338., 10.], [127., 102., 10.], [193., 281., 10.], [126., 208., 10.], [267., 115., 10.], [197., 102., 10.], [198., 215., 10.], [123., 279., 10.], [126., 46., 10.], [259., 247., 10.], [196., 43., 10.], [ 54., 276., 10.], [267., 358., 10.], [ 58., 100., 10.], [259., 305., 10.], [185., 347., 16.], [261., 174., 16.], [ 46., 336., 16.], [ 54., 217., 10.], [ 55., 157., 10.], [ 57., 41., 10.], [260., 47., 16.]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) float_dtype = _supported_float_type(image.dtype) image = image.astype(float_dtype, copy=False) # if both min and max sigma are scalar, function returns only one sigma scalar_sigma = np.isscalar(max_sigma) and np.isscalar(min_sigma) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if np.isscalar(max_sigma): max_sigma = np.full(image.ndim, max_sigma, dtype=float_dtype) if np.isscalar(min_sigma): min_sigma = np.full(image.ndim, min_sigma, dtype=float_dtype) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=float_dtype) max_sigma = np.asarray(max_sigma, dtype=float_dtype) if sigma_ratio <= 1.0: raise ValueError('sigma_ratio must be > 1.0') # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # normalization factor for consistency in DoG magnitude sf = 1 / (sigma_ratio - 1) # computing difference between two successive Gaussian blurred images # to obtain an approximation of the scale invariant Laplacian of the # Gaussian operator dog_images = [ (gaussian_images[i] - gaussian_images[i + 1]) * sf for i in range(k) ] image_cube = np.stack(dog_images, axis=-1) exclude_border = _format_exclude_border(image.ndim, exclude_border) local_maxima = peak_local_max( image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border, ) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(float_dtype) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] if scalar_sigma: # select one sigma column, keeping dimension sigmas_of_peaks = sigmas_of_peaks[:, 0:1] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) sigma_dim = sigmas_of_peaks.shape[1] return _prune_blobs(lm, overlap, sigma_dim=sigma_dim) " 8743,"def test_configparser_multi_lines(multi_fakeconfig): # spam assert multi_fakeconfig.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-breakline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert multi_fakeconfig.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert multi_fakeconfig.spam.cheese == [ 'cheddar', 'reblochon', 'camembert', ] ","def test_configparser_multi_lines(multi_fakeconfig): # spam assert multi_fakeconfig.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-breakline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert multi_fakeconfig.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert multi_fakeconfig.spam.cheeses == [ 'cheddar', 'reblochon', 'camembert', ] " 702,"def log_scrapy_info(settings): logger.info(""Scrapy %(version)s started (bot: %(bot)s)"", {'version': scrapy.__version__, 'bot': settings['BOT_NAME']}) logger.info(""Versions: %(versions)s"", {'versions': "", "".join(""%s %s"" % (name, version) for name, version in scrapy_components_versions() if name != ""Scrapy"")}) from twisted.internet import reactor logger.debug(""Using reactor: %s.%s"" % (reactor.__module__, reactor.__class__.__name__)) ","def log_scrapy_info(settings): logger.info(""Scrapy %(version)s started (bot: %(bot)s)"", {'version': scrapy.__version__, 'bot': settings['BOT_NAME']}) logger.info(""Versions: %(versions)s"", {'versions': "", "".join(""%s %s"" % (name, version) for name, version in scrapy_components_versions() if name != ""Scrapy"")}) from twisted.internet import reactor logger.debug(""Using reactor: %s.%s"", reactor.__module__, reactor.__class__.__name__) " 8059,"def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """""" Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> >>> def setup_hdfs5_index(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5table.cols.foo.create_csindex() # CS index is required ... h5file.flush() ... h5file.close() >>> >>> setup_hdfs5_index() # doctest: +SKIP >>> ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', sortby='foo') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """""" assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step) ","def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """""" Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> >>> # set up a new hdf5 table to demonstrate with >>> class FooBar(tables.IsDescription): # doctest: +SKIP ... foo = tables.Int32Col(pos=0) # doctest: +SKIP ... bar = tables.StringCol(6, pos=2) # doctest: +SKIP >>> >>> def setup_hdf5_index(): ... import tables ... h5file = tables.open_file('example.h5', mode='w', ... title='Example file') ... h5file.create_group('/', 'testgroup', 'Test Group') ... h5table = h5file.create_table('/testgroup', 'testtable', FooBar, ... 'Test Table') ... # load some data into the table ... table1 = (('foo', 'bar'), ... (1, b'asdfgh'), ... (2, b'qwerty'), ... (3, b'zxcvbn')) ... for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... h5table.cols.foo.create_csindex() # CS index is required ... h5file.flush() ... h5file.close() >>> >>> setup_hdfs5_index() # doctest: +SKIP >>> ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', sortby='foo') # doctest: +SKIP >>> table2 # doctest: +SKIP +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """""" assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step) " 17355,"def convert_units(obj, to): if isinstance(obj, xr.Dataset): data_vars = { name: convert_units(array, to) for name, array in obj.data_vars.items() } coords = {name: convert_units(array, to) for name, array in obj.coords.items()} attrs = obj.attrs new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs) elif isinstance(obj, xr.DataArray): name = obj.name new_units = ( to.get(name, None) or to.get(""data"", None) or to.get(None, None) or 1 ) data = convert_units(obj.data, {None: new_units}) coords = { name: (array.dims, convert_units(array.data, to)) for name, array in obj.coords.items() if name != obj.name } dims = obj.dims attrs = obj.attrs new_obj = xr.DataArray(name=name, data=data, coords=coords, dims=dims) elif isinstance(obj, unit_registry.Quantity): units = to.get(None) new_obj = obj.to(units) if units is not None else obj else: new_obj = obj return new_obj ","def convert_units(obj, to): if isinstance(obj, xr.Dataset): data_vars = { name: convert_units(array, to) for name, array in obj.data_vars.items() } coords = {name: convert_units(array, to) for name, array in obj.coords.items()} attrs = obj.attrs new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs) elif isinstance(obj, xr.DataArray): name = obj.name new_units = ( to.get(name, None) or to.get(""data"", None) or to.get(None, None) or 1 ) data = convert_units(obj.data, {None: new_units}) coords = { name: (array.dims, convert_units(array.data, to)) for name, array in obj.coords.items() if name != obj.name } dims = obj.dims attrs = obj.attrs new_obj = xr.DataArray(name=name, data=data, coords=coords, dims=dims) elif isinstance(obj, unit_registry.Quantity): units = to.get() new_obj = obj.to(units) if units is not None else obj else: new_obj = obj return new_obj " 22329,"def seconds_to_str(value): """"""Convert seconds to a simple simple string describing the amount of time."""""" if value < 60: return ""%s seconds"" % round(value, 2) elif value < 3600: return ""%s minutes"" % round(value / 60, 2) else: return ""{} hours and {} minutes"".format(int(value / 3600), round((value % 3600) / 60, 2)) ","def seconds_to_str(value): """"""Convert seconds to a simple simple string describing the amount of time."""""" if value < 60: return ""%s seconds"" % round(value, 2) elif value < 3600: return ""%s minutes"" % round(value / 60, 2) else: return ""{} hours and {} minutes"".format(value // 3600, round((value % 3600) / 60, 2)) " 4837,"def add_tools_to_container(container, tools=default_toolbar_tools): """""" Add multiple tools to the container. Parameters ---------- container : Container `backend_bases.ToolContainerBase` object that will get the tools added. tools : list, optional List in the form ``[[group1, [tool1, tool2 ...]], [group2, [...]]]`` where the tools ``[tool1, and tool2, ...]`` will display in group1. See `add_tool` for details. """""" for group, grouptools in tools: for position, tool in enumerate(grouptools): container.add_tool(tool, group, position) ","def add_tools_to_container(container, tools=default_toolbar_tools): """""" Add multiple tools to the container. Parameters ---------- container : Container `backend_bases.ToolContainerBase` object that will get the tools added. tools : list, optional List in the form ``[[group1, [tool1, tool2 ...]], [group2, [...]]]`` where the tools ``[tool1, tool2, ...]`` will display in group1. See `add_tool` for details. """""" for group, grouptools in tools: for position, tool in enumerate(grouptools): container.add_tool(tool, group, position) " 14625,"def compute_evaluation_metrics(metrics, labels, predictions, model_type, label_dict=None, grid_objective=None, probability=False, logger=None): """""" Compute given metrics to evaluate the given predictions generated by the given type of estimator against the given true labels. Parameters ---------- metrics : list of str List of metrics to compute. labels : array-like True labels to be used for computing the metrics. predictions : array-like The predictions to be used for computing the metrics. model_type : str One of ""classifier"" or ""regressor"". label_dict : dict, optional Dictionary mapping classes labels to indices for classification. Defaults to ``None``. grid_objective : str, optional The objective used for tuning the hyper-parameters of the model that generated the predictions. If ``None``, it means that no grid search was done. Defaults to ``None``. probability : bool, optional Does the model output class probabilities? Defaults to ``False``. logger : logging.Logger, optional A logger instance to use for logging messages and warnings. If ``None``, a new one is created. Defaults to ``None``. Returns ------- res : 5-tuple The confusion matrix, the overall accuracy, the per-label PRFs, the grid search objective function score, and the additional evaluation metrics, if any. For regressors, the first two elements are ``None``. """""" # set up the logger logger = logger if logger else logging.getLogger(__name__) # warn if grid objective was also specified in metrics if len(metrics) > 0 and grid_objective in metrics: logger.warning(f""The grid objective '{grid_objective}' is also "" f""specified as an evaluation metric. Since its "" f""value is already included in the results as the "" f""objective score, it will not be printed "" f""again in the list of metrics."") metrics = [metric for metric in metrics if metric != grid_objective] # initialize a dictionary that will hold all of the metric scores metric_scores = {metric: None for metric in metrics} # if we are a classifier and in probability mode, then # `yhat` are probabilities so we need to compute the # class indices separately and save them too if probability and model_type == 'classifier': class_probs = predictions predictions = np.argmax(class_probs, axis=1) # if we are a regressor or classifier not in probability # mode, then we have the class indices already and there # are no probabilities else: class_probs = None # make a single list of metrics including the grid objective # since it's easier to compute everything together metrics_to_compute = [grid_objective] + metrics for metric in metrics_to_compute: # skip the None if we are not doing grid search if not metric: continue # CASE 1: in probability mode for classification which means we # need to either use the probabilities directly or infer the labels # from them depending on the metric if probability: # there are three possible cases here: # (a) if we are using a correlation metric or # `average_precision` or `roc_auc` in a binary # classification scenario, then we need to explicitly # pass in the probabilities of the positive class. # (b) if we are using `neg_log_loss`, then we # just pass in the full probability array # (c) we compute the most likely labels from the # probabilities via argmax and use those # for all other metrics if (len(label_dict) == 2 and (metric in CORRELATION_METRICS or metric in ['average_precision', 'roc_auc']) and metric != grid_objective): logger.info(f""using probabilities for the positive class to "" f""compute '{metric}' for evaluation."") preds_for_metric = class_probs[:, 1] elif metric == 'neg_log_loss': preds_for_metric = class_probs else: preds_for_metric = predictions # CASE 2: no probability mode for classifier or regressor # in which case we just use the predictions as they are else: preds_for_metric = predictions try: metric_scores[metric] = use_score_func(metric, labels, preds_for_metric) except ValueError: metric_scores[metric] = float('NaN') # now separate out the grid objective score from the additional metric scores # if a grid objective was actually passed in. If no objective was passed in # then that score should just be none. objective_score = None additional_scores = metric_scores.copy() if grid_objective: objective_score = metric_scores[grid_objective] del additional_scores[grid_objective] # compute some basic statistics for regressors if model_type == 'regressor': result_dict = {'descriptive': defaultdict(dict)} for table_label, y in zip(['actual', 'predicted'], [labels, predictions]): result_dict['descriptive'][table_label]['min'] = min(y) result_dict['descriptive'][table_label]['max'] = max(y) result_dict['descriptive'][table_label]['avg'] = np.mean(y) result_dict['descriptive'][table_label]['std'] = np.std(y) result_dict['pearson'] = use_score_func('pearson', labels, predictions) res = (None, None, result_dict, objective_score, additional_scores) else: # compute the confusion matrix and precision/recall/f1 # note that we are using the class indices here # and not the actual class labels themselves num_labels = len(label_dict) conf_mat = confusion_matrix(labels, predictions, labels=list(range(num_labels))) # Calculate metrics overall_accuracy = accuracy_score(labels, predictions) result_matrix = precision_recall_fscore_support( labels, predictions, labels=list(range(num_labels)), average=None) # Store results result_dict = defaultdict(dict) for actual_label in sorted(label_dict): col = label_dict[actual_label] result_dict[actual_label][""Precision""] = result_matrix[0][col] result_dict[actual_label][""Recall""] = result_matrix[1][col] result_dict[actual_label][""F-measure""] = result_matrix[2][col] res = (conf_mat.tolist(), overall_accuracy, result_dict, objective_score, additional_scores) return res ","def compute_evaluation_metrics(metrics, labels, predictions, model_type, label_dict=None, grid_objective=None, probability=False, logger=None): """""" Compute given metrics to evaluate the given predictions generated by the given type of estimator against the given true labels. Parameters ---------- metrics : list of str List of metrics to compute. labels : array-like True labels to be used for computing the metrics. predictions : array-like The predictions to be used for computing the metrics. model_type : str One of ""classifier"" or ""regressor"". label_dict : dict, optional Dictionary mapping class labels to indices for classification. Defaults to ``None``. grid_objective : str, optional The objective used for tuning the hyper-parameters of the model that generated the predictions. If ``None``, it means that no grid search was done. Defaults to ``None``. probability : bool, optional Does the model output class probabilities? Defaults to ``False``. logger : logging.Logger, optional A logger instance to use for logging messages and warnings. If ``None``, a new one is created. Defaults to ``None``. Returns ------- res : 5-tuple The confusion matrix, the overall accuracy, the per-label PRFs, the grid search objective function score, and the additional evaluation metrics, if any. For regressors, the first two elements are ``None``. """""" # set up the logger logger = logger if logger else logging.getLogger(__name__) # warn if grid objective was also specified in metrics if len(metrics) > 0 and grid_objective in metrics: logger.warning(f""The grid objective '{grid_objective}' is also "" f""specified as an evaluation metric. Since its "" f""value is already included in the results as the "" f""objective score, it will not be printed "" f""again in the list of metrics."") metrics = [metric for metric in metrics if metric != grid_objective] # initialize a dictionary that will hold all of the metric scores metric_scores = {metric: None for metric in metrics} # if we are a classifier and in probability mode, then # `yhat` are probabilities so we need to compute the # class indices separately and save them too if probability and model_type == 'classifier': class_probs = predictions predictions = np.argmax(class_probs, axis=1) # if we are a regressor or classifier not in probability # mode, then we have the class indices already and there # are no probabilities else: class_probs = None # make a single list of metrics including the grid objective # since it's easier to compute everything together metrics_to_compute = [grid_objective] + metrics for metric in metrics_to_compute: # skip the None if we are not doing grid search if not metric: continue # CASE 1: in probability mode for classification which means we # need to either use the probabilities directly or infer the labels # from them depending on the metric if probability: # there are three possible cases here: # (a) if we are using a correlation metric or # `average_precision` or `roc_auc` in a binary # classification scenario, then we need to explicitly # pass in the probabilities of the positive class. # (b) if we are using `neg_log_loss`, then we # just pass in the full probability array # (c) we compute the most likely labels from the # probabilities via argmax and use those # for all other metrics if (len(label_dict) == 2 and (metric in CORRELATION_METRICS or metric in ['average_precision', 'roc_auc']) and metric != grid_objective): logger.info(f""using probabilities for the positive class to "" f""compute '{metric}' for evaluation."") preds_for_metric = class_probs[:, 1] elif metric == 'neg_log_loss': preds_for_metric = class_probs else: preds_for_metric = predictions # CASE 2: no probability mode for classifier or regressor # in which case we just use the predictions as they are else: preds_for_metric = predictions try: metric_scores[metric] = use_score_func(metric, labels, preds_for_metric) except ValueError: metric_scores[metric] = float('NaN') # now separate out the grid objective score from the additional metric scores # if a grid objective was actually passed in. If no objective was passed in # then that score should just be none. objective_score = None additional_scores = metric_scores.copy() if grid_objective: objective_score = metric_scores[grid_objective] del additional_scores[grid_objective] # compute some basic statistics for regressors if model_type == 'regressor': result_dict = {'descriptive': defaultdict(dict)} for table_label, y in zip(['actual', 'predicted'], [labels, predictions]): result_dict['descriptive'][table_label]['min'] = min(y) result_dict['descriptive'][table_label]['max'] = max(y) result_dict['descriptive'][table_label]['avg'] = np.mean(y) result_dict['descriptive'][table_label]['std'] = np.std(y) result_dict['pearson'] = use_score_func('pearson', labels, predictions) res = (None, None, result_dict, objective_score, additional_scores) else: # compute the confusion matrix and precision/recall/f1 # note that we are using the class indices here # and not the actual class labels themselves num_labels = len(label_dict) conf_mat = confusion_matrix(labels, predictions, labels=list(range(num_labels))) # Calculate metrics overall_accuracy = accuracy_score(labels, predictions) result_matrix = precision_recall_fscore_support( labels, predictions, labels=list(range(num_labels)), average=None) # Store results result_dict = defaultdict(dict) for actual_label in sorted(label_dict): col = label_dict[actual_label] result_dict[actual_label][""Precision""] = result_matrix[0][col] result_dict[actual_label][""Recall""] = result_matrix[1][col] result_dict[actual_label][""F-measure""] = result_matrix[2][col] res = (conf_mat.tolist(), overall_accuracy, result_dict, objective_score, additional_scores) return res " 32110,"def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict: """""" create_intel command: Creates Intel in CTIX """""" data = { ""ips"": args.get(""ips"", []), ""urls"": args.get(""urls"", []), ""domains"": args.get(""domains"", []), ""files"": args.get(""files"", []), ""emails"": args.get(""emails"", []), ""malwares"": args.get(""malwares"", []), ""threat_actors"": args.get(""threat_actors"", []), ""attack_patterns"": args.get(""attack_patterns"", []), ""title"": args.get(""title"", None), ""description"": args.get(""description"", None), ""confidence"": args.get(""confidence"", None), ""tlp"": args.get(""tlp"", None), } create_intel_response = client.create_intel(data) return create_intel_response ","def create_intel_command(client: Client, args: Dict[str, Any]) -> Dict: """""" create_intel command: Creates Intel in CTIX """""" data = { ""ips"": args.get(""ips"", []), ""urls"": args.get(""urls"", []), ""domains"": args.get(""domains"", []), ""files"": args.get(""files"", []), ""emails"": args.get(""emails"", []), ""malwares"": args.get(""malwares"", []), ""threat_actors"": args.get(""threat_actors"", []), ""attack_patterns"": args.get(""attack_patterns"", []), ""title"": args.get(""title"", None), ""description"": args.get(""description""), ""confidence"": args.get(""confidence"", None), ""tlp"": args.get(""tlp"", None), } create_intel_response = client.create_intel(data) return create_intel_response " 12125,"def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, return_cf=False): """""" Wrapper for P-picker routine by M. Baer, Schweizer Erdbebendienst. :param reltrc: time series as numpy.ndarray float32 data, possibly filtered :param samp_int: number of samples per second :param tdownmax: if dtime exceeds tdownmax, the trigger is examined for validity :param tupevent: min nr of samples for itrm to be accepted as a pick :param thr1: threshold to trigger for pick (c.f. paper) :param thr2: threshold for updating sigma (c.f. paper) :param preset_len: no of points taken for the estimation of variance of SF(t) on preset() :param p_dur: p_dur defines the time interval for which the maximum amplitude is evaluated Originally set to 6 secs :type return_cf: bool :param return_cf: If ``True``, also return the charachteristic function calculated by the C-routine. :return: (pptime, pfm [,cf]) pptime sample number of parrival; pfm direction of first motion (U or D), optionally also the numpy.ndarray float32 containing the values of the characteristic function. .. note:: currently the first sample is not taken into account .. seealso:: [Baer1987]_ """""" pptime = C.c_int() # c_chcar_p strings are immutable, use string_buffer for pointers pfm = C.create_string_buffer(b"" "", 5) # be nice and adapt type if necessary reltrc = np.ascontiguousarray(reltrc, np.float32) # Initiliaze CF array (MB) c_float_p = C.POINTER(C.c_float) cf_arr = np.ascontiguousarray(np.zeros(len(reltrc) - 1), np.float32) cf_p = cf_arr.ctypes.data_as(c_float_p) # index in pk_mbaer.c starts with 1, 0 index is lost, length must be # one shorter args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p) errcode = clibsignal.ppick(reltrc, *args) if errcode != 0: raise MemoryError(""Error in function ppick of mk_mbaer.c"") # Switch cf_arr param (MB) # add the sample to the time which is not taken into account # pfm has to be decoded from byte to string if return_cf: return pptime.value + 1, pfm.value.decode('utf-8'), cf_arr else: return pptime.value + 1, pfm.value.decode('utf-8') ","def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, return_cf=False): """""" Wrapper for P-picker routine by M. Baer, Schweizer Erdbebendienst. :param reltrc: time series as numpy.ndarray float32 data, possibly filtered :param samp_int: number of samples per second :param tdownmax: if dtime exceeds tdownmax, the trigger is examined for validity :param tupevent: min nr of samples for itrm to be accepted as a pick :param thr1: threshold to trigger for pick (c.f. paper) :param thr2: threshold for updating sigma (c.f. paper) :param preset_len: no of points taken for the estimation of variance of SF(t) on preset() :param p_dur: p_dur defines the time interval for which the maximum amplitude is evaluated Originally set to 6 secs :type return_cf: bool :param return_cf: If ``True``, also return the charachteristic function calculated by the C-routine. :return: (pptime, pfm [,cf]) pptime sample number of parrival; pfm direction of first motion (U or D), optionally also the characteristic function. function. .. note:: currently the first sample is not taken into account .. seealso:: [Baer1987]_ """""" pptime = C.c_int() # c_chcar_p strings are immutable, use string_buffer for pointers pfm = C.create_string_buffer(b"" "", 5) # be nice and adapt type if necessary reltrc = np.ascontiguousarray(reltrc, np.float32) # Initiliaze CF array (MB) c_float_p = C.POINTER(C.c_float) cf_arr = np.ascontiguousarray(np.zeros(len(reltrc) - 1), np.float32) cf_p = cf_arr.ctypes.data_as(c_float_p) # index in pk_mbaer.c starts with 1, 0 index is lost, length must be # one shorter args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int, tdownmax, tupevent, thr1, thr2, preset_len, p_dur, cf_p) errcode = clibsignal.ppick(reltrc, *args) if errcode != 0: raise MemoryError(""Error in function ppick of mk_mbaer.c"") # Switch cf_arr param (MB) # add the sample to the time which is not taken into account # pfm has to be decoded from byte to string if return_cf: return pptime.value + 1, pfm.value.decode('utf-8'), cf_arr else: return pptime.value + 1, pfm.value.decode('utf-8') " 43953,"def _diff2(i, j, ri, rj, alpha, beta): r""""""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 `_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function ri (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second order differentiated integral between two Gaussian functions """""" p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3 ","def _diff2(i, j, ri, rj, alpha, beta): r""""""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second-order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 `_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function ri (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second order differentiated integral between two Gaussian functions """""" p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3 " 31349,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'picus-attack-single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 31717,"def main(): args = demisto.args() list_name = args['list_name'] parse_all = args['parse_all'] header = args.get('header', '') value = args.get('value', '') list_separator = args.get('list_separator', ',') return_results(parse_list(parse_all, header, value, list_name, list_separator)) ","def main(): args = demisto.args() list_name = args['list_name'] parse_all = args['parse_all'] header = args.get('header', '') value = args.get('value', '') list_separator = args.get('list_separator', ',') or ',' return_results(parse_list(parse_all, header, value, list_name, list_separator)) " 14074,"def geom_almost_equals_mask(this, that): """""" Test for 'almost' geometric equality. Empty or missing geometries considered equal. This method allows small difference in the coordinates, but this requires coordinates be in the same order for all components of a geometry. Parameters ---------- this, that : arrays of Geo objects (or anything that has an `is_empty` property) Returns ------- Series boolean Series, True if geometries in left almost equal geometries in right """""" return ( this.geom_almost_equals(that) | (this.is_empty & that.is_empty) | (_isna(this) & _isna(that)) ) ","def _geom_almost_equals_mask(this, that): """""" Test for 'almost' geometric equality. Empty or missing geometries considered equal. This method allows small difference in the coordinates, but this requires coordinates be in the same order for all components of a geometry. Parameters ---------- this, that : arrays of Geo objects (or anything that has an `is_empty` property) Returns ------- Series boolean Series, True if geometries in left almost equal geometries in right """""" return ( this.geom_almost_equals(that) | (this.is_empty & that.is_empty) | (_isna(this) & _isna(that)) ) " 32368,"def test_get_contributing_event_command(requests_mock): from CortexXDRIR import get_contributing_event_command, Client contributing_events = load_test_data('./test_data/contributing_events.json') requests_mock.post(f'{XDR_URL}/public_api/v1/alerts/get_correlation_alert_data/', json=contributing_events) client = Client( base_url=f'{XDR_URL}/public_api/v1', headers={} ) args = { ""alert_ids"": ""[1111]"", } response = get_contributing_event_command(client, args) assert response.outputs[0].get('alertID') == args.get('alert_ids').replace('[', '').replace(']', '') assert len(response.outputs[0].get('events')) == 1 ","def test_get_contributing_event_command(requests_mock): from CortexXDRIR import get_contributing_event_command, Client contributing_events = load_test_data('./test_data/contributing_events.json') requests_mock.post(f'{XDR_URL}/public_api/v1/alerts/get_correlation_alert_data/', json=contributing_events) client = Client( base_url=f'{XDR_URL}/public_api/v1', headers={} ) args = { ""alert_ids"": ""[1111]"", } response = get_contributing_event_command(client, args) assert response.outputs[0].get('alertID') == args.get('alert_ids').strip(""[]"") assert len(response.outputs[0].get('events')) == 1 " 8393,"def template_redshift(observed_spectrum, template_spectrum, min_redshift, max_redshift, delta_redshift): """""" Find the most accurate redshift for template_spectrum to match observed_spectrum using chi2 Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum, which will have it's redshift calculated min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- final_redshift : `float` The most accurate redshift for template_spectrum to match the observed_spectrum redshifted_spectrum: :class:`~specutils.Spectrum1D` A new Spectrum1D object which incorporates the template_spectrum with a spectral_axis that has been redshifted using the final_redshift """""" if not (min_redshift and max_redshift and delta_redshift) or (min_redshift>max_redshift): return redshift = min_redshift chi2_min = None final_redshift = None # Loop which goes through available redshift options and finds the smallest chi2 while redshift <= max_redshift: # Create new redshifted spectrum and run it through the chi2 method redshifted_spectrum = Spectrum1D(spectral_axis=template_spectrum.spectral_axis*(1+redshift), flux=template_spectrum.flux) normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, redshifted_spectrum, ""flux_conserving"") # Set new chi2_min if suitable replacement is found if not np.isnan(chi2) and (chi2_min is None or chi2 < chi2_min): chi2_min = chi2 final_redshift = redshift redshift += delta_redshift return final_redshift, redshifted_spectrum ","def template_redshift(observed_spectrum, template_spectrum, min_redshift, max_redshift, delta_redshift): """""" Find the most accurate redshift for template_spectrum to match observed_spectrum using chi2 Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum, which will have it's redshift calculated min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- final_redshift : `float` The most accurate redshift for template_spectrum to match the observed_spectrum redshifted_spectrum: :class:`~specutils.Spectrum1D` A new Spectrum1D object which incorporates the template_spectrum with a spectral_axis that has been redshifted using the final_redshift """""" if not (min_redshift and max_redshift and delta_redshift) or (min_redshift>max_redshift): return redshift = min_redshift chi2_min = None final_redshift = None # Loop which goes through available redshift options and finds the smallest chi2 while redshift <= max_redshift: # Create new redshifted spectrum and run it through the chi2 method redshifted_spectrum = Spectrum1D(spectral_axis=template_spectrum.spectral_axis*(1+redshift), flux=template_spectrum.flux) normalized_spectral_template, chi2 = _chi_square_for_templates( observed_spectrum, redshifted_spectrum, ""flux_conserving"") # Set new chi2_min if suitable replacement is found if not np.isnan(chi2) and (chi2_min is None or chi2 < chi2_min): chi2_min = chi2 final_redshift = redshift redshift += delta_redshift return final_redshift, redshifted_spectrum " 45782,"def spatial_soft_argmax2d( input: torch.Tensor, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> torch.Tensor: r""""""Function that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: temperature (torch.Tensor): factor to apply to input. Default is 1. normalized_coordinates (bool): whether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. eps (float): small value to avoid zero division. Default is 1e-8. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples: >>> input = torch.tensor([[[ [0., 0., 0.], [0., 10., 0.], [0., 0., 0.]]]]) >>> coords = kornia.spatial_soft_argmax2d(input, False) tensor([[[1.0000, 1.0000]]]) """""" input_soft: torch.Tensor = dsnt.spatial_softmax_2d(input, temperature) output: torch.Tensor = dsnt.spatial_softargmax_2d(input, normalized_coordinates) return output ","def spatial_soft_argmax2d( input: torch.Tensor, temperature: torch.Tensor = torch.tensor(1.0), normalized_coordinates: bool = True, eps: float = 1e-8) -> torch.Tensor: r""""""Function that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: temperature (torch.Tensor): factor to apply to input. Default is 1. normalized_coordinates (bool): whether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. eps (float): small value to avoid zero division. Default is 1e-8. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples: >>> input = torch.tensor([[[ [0., 0., 0.], [0., 10., 0.], [0., 0., 0.]]]]) >>> coords = kornia.spatial_soft_argmax2d(input, False) tensor([[[1.0000, 1.0000]]]) """""" input_soft: torch.Tensor = dsnt.spatial_softmax_2d(input, temperature) output: torch.Tensor = dsnt.spatial_softargmax_2d(input_soft, normalized_coordinates) return output " 14394,"def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50): """"""Plots porkchop between two bodies. Parameters ---------- body_dpt: poliastro.bodies.Body Body for launch body_arr: poliastro.bodies.Body Body for arrival dpt_start: str Porkchop launch date starts in this value dpt_end: str Porkchop launch date ends in this value arr_start: str Porkchop arrival date starts in this value arr_end: str Porkchop arrival date ends in this value Returns ------- dpt: np.array Departure time span arr: np.array Arrival time span deltav_dpt: np.ndarray Departure velocity needed for each time of flight deltav_arr: np.ndarray Arrival velocity needed for each time of flight c3_dpt: np.ndarray Characteristic launch energy c3_arr: np.ndarray Characteristic arrival energy Example ------- # Time requirements YYYY-MM-DD # Data is from porkchop pag. 180 >>> from poliastro.plotting.porkchop import porkchop >>> from poliastro.bodies import Earth, Mars >>> import matplotlib.pyplot as plt >>> departure_start = ""2005-04-30"" >>> departure_end = ""2005-10-07"" >>> arrival_start = ""2005-11-16"" >>> arrival_end = ""2006-12-21"" >>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end) >>> plt.show() """""" # Computing time spans fot departure and arrival dpt = [ Time(d, format=""jd"") for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1) ] arr = [ Time(d, format=""jd"") for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1) ] # Prellocate in memory the arrays deltav_dpt = np.zeros((len(dpt), len(arr))) deltav_arr = np.zeros((len(dpt), len(arr))) c3_dpt = np.zeros((len(dpt), len(arr))) c3_arr = np.zeros((len(dpt), len(arr))) iso_tof = np.zeros((len(dpt), len(arr))) idx = 0 for d in dpt: dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized( body_dpt, body_arr, d, arr ) deltav_dpt[idx] = dv_dpt deltav_arr[idx] = dv_arr c3_dpt[idx] = c3_d c3_arr[idx] = c3_a iso_tof[idx] = t_flight idx += 1 """""" Algorithm works: 'for each launch get all arrivals'. Contourf works: 'for each Y -> all X'. We need to transpose the arrays. """""" fig, ax = plt.subplots(figsize=(15, 15)) c3_levels = np.linspace(0, 45, 30) t_levels = np.linspace(100, 500, 5) c = plt.contourf( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], np.transpose(c3_dpt), c3_levels, ) l = plt.contour( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], np.transpose(c3_dpt), c3_levels, colors=""black"", linestyles=""solid"", ) t = plt.contour( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], np.transpose(iso_tof), t_levels, colors=""red"", linestyles=""dashed"", linewidths=3.5, ) cbar = plt.colorbar(c) cbar.set_label(""$km^2/s^2$"") plt.clabel(l, inline=1, fmt=""%1.1f"", colors=""k"", fontsize=10) plt.clabel(t, inline=1, fmt=""%1.1f"", colors=""r"", fontsize=14) plt.grid() fig.autofmt_xdate() plt.title( ""{} - {} for year {}, C3 Launch, TFL"".format( body_dpt.name, body_arr.name, dpt[0].datetime.year ), fontsize=14, fontweight=""bold"", ) plt.xlabel(""Launch date"", fontsize=10, fontweight=""bold"") plt.ylabel(""Arrival date"", fontsize=10, fontweight=""bold"") plt.show() return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr ","def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50): """"""Plots porkchop between two bodies. Parameters ---------- body_dpt: poliastro.bodies.Body Body for launch body_arr: poliastro.bodies.Body Body for arrival dpt_start: str Porkchop launch date starts in this value dpt_end: str Porkchop launch date ends in this value arr_start: str Porkchop arrival date starts in this value arr_end: str Porkchop arrival date ends in this value Returns ------- dpt: np.array Departure time span arr: np.array Arrival time span deltav_dpt: np.ndarray Departure velocity needed for each time of flight deltav_arr: np.ndarray Arrival velocity needed for each time of flight c3_dpt: np.ndarray Characteristic launch energy c3_arr: np.ndarray Characteristic arrival energy Example ------- # Time requirements YYYY-MM-DD # Data is from porkchop pag. 180 >>> from poliastro.plotting.porkchop import porkchop >>> from poliastro.bodies import Earth, Mars >>> import matplotlib.pyplot as plt >>> departure_start = ""2005-04-30"" >>> departure_end = ""2005-10-07"" >>> arrival_start = ""2005-11-16"" >>> arrival_end = ""2006-12-21"" >>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end) >>> plt.show() """""" # Computing time spans fot departure and arrival dpt = [ Time(d, format=""jd"") for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1) ] arr = [ Time(d, format=""jd"") for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1) ] # Prellocate in memory the arrays deltav_dpt = np.zeros((len(dpt), len(arr))) deltav_arr = np.zeros((len(dpt), len(arr))) c3_dpt = np.zeros((len(dpt), len(arr))) c3_arr = np.zeros((len(dpt), len(arr))) iso_tof = np.zeros((len(dpt), len(arr))) idx = 0 for d in dpt: dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized( body_dpt, body_arr, d, arr ) deltav_dpt[idx] = dv_dpt deltav_arr[idx] = dv_arr c3_dpt[idx] = c3_d c3_arr[idx] = c3_a iso_tof[idx] = t_flight idx += 1 """""" Algorithm works: 'for each launch get all arrivals'. Contourf works: 'for each Y -> all X'. We need to transpose the arrays. """""" fig, ax = plt.subplots(figsize=(15, 15)) c3_levels = np.linspace(0, 45, 30) t_levels = np.linspace(100, 500, 5) c = plt.contourf( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], np.transpose(c3_dpt), c3_levels, ) l = plt.contour( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], np.transpose(c3_dpt), c3_levels, colors=""black"", linestyles=""solid"", ) t = plt.contour( [D.to_datetime() for D in dpt], [A.to_datetime() for A in arr], iso_tof.T, t_levels, colors=""red"", linestyles=""dashed"", linewidths=3.5, ) cbar = plt.colorbar(c) cbar.set_label(""$km^2/s^2$"") plt.clabel(l, inline=1, fmt=""%1.1f"", colors=""k"", fontsize=10) plt.clabel(t, inline=1, fmt=""%1.1f"", colors=""r"", fontsize=14) plt.grid() fig.autofmt_xdate() plt.title( ""{} - {} for year {}, C3 Launch, TFL"".format( body_dpt.name, body_arr.name, dpt[0].datetime.year ), fontsize=14, fontweight=""bold"", ) plt.xlabel(""Launch date"", fontsize=10, fontweight=""bold"") plt.ylabel(""Arrival date"", fontsize=10, fontweight=""bold"") plt.show() return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr " 8368,"def _isophote_list_to_table(isophote_list, key_properties=['main']): """""" Convert an `~photutils.isophote.IsophoteList` instance to a `~astropy.table.QTable`. Parameters ---------- isophote_list : list of `~photutils.isophote.Isophote` or \ `~photutils.isophote.IsophoteList` instance A list of isophotes. key_properties : A list of properties to export from the isophote_list If key_properties = ['all'] or ['main'], it will pick all or few of the main properties. Returns ------- result : `~astropy.table.QTable` An astropy QTable with the selected or all isophote parameters. """""" properties = OrderedDict() isotable = QTable() # main_properties: `List` # A list of main parameters matching the original names of # the isophote_list parameters def __rename_properties(properties, orig_names = ['int_err', 'eps', 'ellip_err', 'grad_r_error', 'nflag'], new_names = ['intens_err', 'ellipticity', 'ellipticity_err', 'grad_rerror', 'nflag'] ): ''' Simple renaming for some of the isophote_list parameters. Parameters ---------- properties: `OrderedDict` An OrderedDict with the list of the isophote_list parameters orig_names: `List` A list of original names in the isophote_list parameters to be renamed new_names: `List` A list of new names matching in length of the orig_names Returns ------- properties: `OrderedDict` An OrderedDict with the list of the renamed isophote_list parameters ''' main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err', 'pa', 'pa_err', 'grad', 'grad_error', 'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err', 'ndata', 'nflag', 'niter', 'stop_code'] for an_item in main_properties: if an_item in orig_names: properties[an_item] = new_names[orig_names.index(an_item)] else: properties[an_item] = an_item return properties if 'all' in key_properties: properties = _get_properties(isophote_list) properties = __rename_properties(properties) elif 'main' in key_properties: properties = __rename_properties(properties) else: for an_item in key_properties: properties[an_item] = an_item for k, v in properties.items(): isotable[v] = np.array([getattr(iso, k) for iso in isophote_list]) if k in ('pa', 'pa_err'): isotable[v] = isotable[v] * 180. / np.pi * u.deg return isotable ","def _isophote_list_to_table(isophote_list, key_properties=['main']): """""" Convert an `~photutils.isophote.IsophoteList` instance to a `~astropy.table.QTable`. Parameters ---------- isophote_list : list of `~photutils.isophote.Isophote` or \ `~photutils.isophote.IsophoteList` instance A list of isophotes. key_properties : A list of properties to export from the isophote_list If key_properties = ['all'] or ['main'], it will pick all or few of the main properties. Returns ------- result : `~astropy.table.QTable` An astropy QTable with the selected or all isophote parameters. """""" properties = OrderedDict() isotable = QTable() # main_properties: `List` # A list of main parameters matching the original names of # the isophote_list parameters def __rename_properties(properties, orig_names = ['int_err', 'eps', 'ellip_err', 'grad_r_error', 'nflag'], new_names = ['intens_err', 'ellipticity', 'ellipticity_err', 'grad_rerror', 'nflag'] ): ''' Simple renaming for some of the isophote_list parameters. Parameters ---------- properties: `OrderedDict` An OrderedDict with the list of the isophote_list parameters orig_names: `List` A list of original names in the isophote_list parameters to be renamed new_names: `List` A list of new names matching in length of the orig_names Returns ------- properties: `OrderedDict` An OrderedDict with the list of the renamed isophote_list parameters ''' main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err', 'pa', 'pa_err', 'grad', 'grad_error', 'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err', 'ndata', 'nflag', 'niter', 'stop_code'] for an_item in main_properties: if an_item in orig_names: properties[an_item] = new_names[orig_names.index(an_item)] else: properties[an_item] = an_item return properties if 'all' in key_properties: properties = _get_properties(isophote_list) properties = __rename_properties(properties) elif 'main' in key_properties: properties = __rename_properties(properties) else: for an_item in columns: properties[an_item] = an_item for k, v in properties.items(): isotable[v] = np.array([getattr(iso, k) for iso in isophote_list]) if k in ('pa', 'pa_err'): isotable[v] = isotable[v] * 180. / np.pi * u.deg return isotable " 6335,"def test_matcher(wmsClient: WMSClient): # insert a proper DN to run the test resourceDescription = { ""OwnerGroup"": ""prod"", ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""DIRACVersion"": ""pippo"", ""GridCE"": ""some.grid.ce.org"", ""ReleaseVersion"": ""blabla"", ""VirtualOrganization"": ""LHCb"", ""PilotInfoReportedFlag"": ""True"", ""PilotBenchmark"": ""anotherPilot"", ""Site"": ""DIRAC.Jenkins.ch"", ""CPUTime"": 86400, } job = helloWorldJob() job.setDestination(""DIRAC.Jenkins.ch"") job.setInputData(""/a/bbb"") job.setType(""User"") jobDescription = createFile(job) res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription)) assert res[""OK""] is True, res[""Message""] jobID = res[""Value""] # forcing the update res = JobStateUpdateClient().setJobStatus(jobID, JobStatus.WAITING, ""matching"", ""source"", None, True) assert res[""OK""] is True, res[""Message""] tqDB = TaskQueueDB() tqDefDict = { ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""OwnerGroup"": ""prod"", ""Setup"": ""dirac-JenkinsSetup"", ""CPUTime"": 86400, } res = tqDB.insertJob(jobID, tqDefDict, 10) assert res[""OK""] is True, res[""Message""] res = MatcherClient().requestJob(resourceDescription) print(res) assert res[""OK""] is True, res[""Message""] wmsClient.deleteJob(jobID) ","def test_matcher(wmsClient: WMSClient): # insert a proper DN to run the test resourceDescription = { ""OwnerGroup"": ""prod"", ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""DIRACVersion"": ""pippo"", ""GridCE"": ""some.grid.ce.org"", ""ReleaseVersion"": ""blabla"", ""VirtualOrganization"": ""LHCb"", ""PilotInfoReportedFlag"": ""True"", ""PilotBenchmark"": ""anotherPilot"", ""Site"": ""DIRAC.Jenkins.ch"", ""CPUTime"": 86400, } job = helloWorldJob() job.setDestination(""DIRAC.Jenkins.ch"") job.setInputData(""/a/bbb"") job.setType(""User"") jobDescription = createFile(job) res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription)) assert res[""OK""] is True, res[""Message""] jobID = res[""Value""] # forcing the update res = JobStateUpdateClient().setJobStatus(jobID, JobStatus.WAITING, ""matching"", ""source"", None, True) assert res[""OK""] is True, res[""Message""] tqDB = TaskQueueDB() tqDefDict = { ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""OwnerGroup"": ""prod"", ""Setup"": ""dirac-JenkinsSetup"", ""CPUTime"": 86400, } res = tqDB.insertJob(jobID, tqDefDict, 10) assert res[""OK""], res[""Message""] res = MatcherClient().requestJob(resourceDescription) print(res) assert res[""OK""] is True, res[""Message""] wmsClient.deleteJob(jobID) " 41892,"def test_generate_contour_plot_for_few_observations() -> None: study = prepare_study_with_trials(less_than_two=True) trials = study.trials # `x_axis` has one observation. params = [""param_a"", ""param_b""] contour, scatter = _generate_contour_subplot( trials, params[0], params[1], StudyDirection.MINIMIZE, {} ) assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None # `y_axis` has one observation. params = [""param_b"", ""param_a""] contour, scatter = _generate_contour_subplot( trials, params[0], params[1], StudyDirection.MINIMIZE, {} ) assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None ","def test_generate_contour_plot_for_few_observations() -> None: study = prepare_study_with_trials(less_than_two=True) trials = study.trials # `x_axis` has one observation. params = [""param_a"", ""param_b""] contour, scatter = _generate_contour_subplot( trials, params[0], params[1], StudyDirection.MINIMIZE, {} ) assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None # `y_axis` has one observation. params = [""param_b"", ""param_a""] contour, scatter = _generate_contour_subplot( trials, params[0], params[1], StudyDirection.MINIMIZE ) assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None " 49143,"def _stdin_thread_start(stdin_port, display): """"""Standard input reader thread entry point."""""" try: # Note: read length should be as large as possible for performance # yet not too large to not introduce artificial latency. # 64k seems to be perfect with an openssh backend (they issue 64k # reads) ; could consider making it an option for e.g. gsissh. bufsize = 64 * 1024 # thread loop: read stdin + send messages to specified port object # use select to work around https://bugs.python.org/issue42717 if select([sys.stdin], [], [], 0) == ([sys.stdin], [], []): buf = sys_stdin().read(bufsize) # use buffer in Python 3 while buf: # send message to specified port object (with ack) stdin_port.msg(buf) if select([sys.stdin], [], [], 0) == ([sys.stdin], [], []): buf = sys_stdin().read(bufsize) except IOError as ex: display.vprint(VERB_VERB, ""stdin: %s"" % ex) # send a None message to indicate EOF stdin_port.msg(None) ","def _stdin_thread_start(stdin_port, display): """"""Standard input reader thread entry point."""""" try: # Note: read length should be as large as possible for performance # yet not too large to not introduce artificial latency. # 64k seems to be perfect with an openssh backend (they issue 64k # reads) ; could consider making it an option for e.g. gsissh. bufsize = 64 * 1024 # thread loop: read stdin + send messages to specified port object # use select to work around https://bugs.python.org/issue42717 while True: if not select([sys_stdin()], [], [], None) == ([sys_stdin()], [], []): break buf = os.read(sys_stdin().fileno(), bufsize) # use os.read to allow partial read if not buf: break # send message to specified port object (with ack) stdin_port.msg(buf) except IOError as ex: display.vprint(VERB_VERB, ""stdin: %s"" % ex) # send a None message to indicate EOF stdin_port.msg(None) " 30009,"def build_categorical(param): if param.weights is not None: raise ValueError('The pcs format does not support categorical hyperparameters with ' 'assigend weights (for hyperparameter %s)' % param.name) cat_template = ""%s categorical {%s} [%s]"" return cat_template % (param.name, "", "".join([str(value) for value in param.choices]), str(param.default_value)) ","def build_categorical(param): if param.weights is not None: raise ValueError('The pcs format does not support categorical hyperparameters with ' 'assigned weights (for hyperparameter %s)' % param.name) cat_template = ""%s categorical {%s} [%s]"" return cat_template % (param.name, "", "".join([str(value) for value in param.choices]), str(param.default_value)) " 58252,"def _transform_headers(data): # type: (Dict[str, str]) -> Dict[str, str] return {key.lower(): value for key, value in data.items() if key.lower() not in (""cookie"", ""set-cookie"")} ","def _transform_headers(data): # type: (Dict[str, str]) -> Dict[str, str] normalized = {} for header, value in data.items(): header = header.lower() if header in (""cookie"", ""set-cookie""): # TODO: Move this tuple to a frozenset ? continue normalized[header] = value return normalized " 8956,"def find_internal_plugins(): """"""List internal plugins. :return: yield instance of :class:`~.handlers.PyModulePlugin` configured for ``sopel.modules.*`` Internal plugins can be found under ``sopel.modules``. This list does not include the ``coretasks`` plugin. """""" plugin_dir = imp.find_module( 'modules', [imp.find_module('sopel')[1]] )[1] for name, _ in _list_plugin_filenames(plugin_dir): yield handlers.PyModulePlugin(name, 'sopel.modules') ","def find_internal_plugins(): """"""List internal plugins. :return: yield instances of :class:`~.handlers.PyModulePlugin` configured for ``sopel.modules.*`` Internal plugins can be found under ``sopel.modules``. This list does not include the ``coretasks`` plugin. """""" plugin_dir = imp.find_module( 'modules', [imp.find_module('sopel')[1]] )[1] for name, _ in _list_plugin_filenames(plugin_dir): yield handlers.PyModulePlugin(name, 'sopel.modules') " 2292,"def _make_array_out(X_out, index, get_feature_names_out, *, array_out=""default""): """"""Construct array container based on global configuration. Parameters ---------- X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out) Output data to be wrapped. index: array-like of shape (n_samples,) Index of output data. get_features_names_out: callable Returns the feature names out. If the callable returns None, then the feature names will be [""X0"", ""X1"", ...]. array_out : {""default"", ""pandas""}, default=""default"" Specify the output array type. If ""pandas"", a pandas DataFrame is returned. If ""default"", an array-like without feature names is returned. Return ------ array_out: {ndarray, sparse matrix, dataframe} of shape \ (n_samples, n_features_out) Wrapped array with feature names. """""" if array_out not in {'default', 'pandas'}: raise ValueError(""array_out must be 'default' or 'pandas'"") if array_out == ""default"": return X_out feature_names_out = get_feature_names_out() if feature_names_out is None: feature_names_out = [f'X{i}' for i in range(X_out.shape[1])] # array_out == ""pandas"" import pandas as pd if sp_sparse.issparse(X_out): make_dataframe = pd.DataFrame.sparse.from_spmatrix else: make_dataframe = pd.DataFrame return make_dataframe(X_out, columns=feature_names_out, index=index) ","def _make_array_out(X_out, index, get_feature_names_out, *, array_out=""default""): """"""Construct array container based on global configuration. Parameters ---------- X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out) Output data to be wrapped. index: array-like of shape (n_samples,) Index of output data. get_features_names_out: callable Returns the feature names out. If the callable returns None, then the feature names will be [""X0"", ""X1"", ...]. array_out : {""default"", ""pandas""}, default=""default"" Specify the output array type. If ""pandas"", a pandas DataFrame is returned. If ""default"", `X_out` is returned unmodified. Return ------ array_out: {ndarray, sparse matrix, dataframe} of shape \ (n_samples, n_features_out) Wrapped array with feature names. """""" if array_out not in {'default', 'pandas'}: raise ValueError(""array_out must be 'default' or 'pandas'"") if array_out == ""default"": return X_out feature_names_out = get_feature_names_out() if feature_names_out is None: feature_names_out = [f'X{i}' for i in range(X_out.shape[1])] # array_out == ""pandas"" import pandas as pd if sp_sparse.issparse(X_out): make_dataframe = pd.DataFrame.sparse.from_spmatrix else: make_dataframe = pd.DataFrame return make_dataframe(X_out, columns=feature_names_out, index=index) " 53936,"def merge_freshness_time_thresholds( base: Optional[Time], update: Optional[Time] ) -> Optional[Time]: if base is not None and update is not None: return base.merged(update) elif base is None and update is not None: return update elif base is not None and update is None: return base else: # base and update are none return None ","def merge_freshness_time_thresholds( base: Optional[Time], update: Optional[Time] ) -> Optional[Time]: try: return base.merged(update) except AttributeError: return update or base " 45656,"def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( ""What is OncoPrint?"" ), html.P( """""" The OncoPrint component is used to view multiple genetic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the BioConductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, allowing the user to create complex bioinformatic apps or workflows that rely on crossfiltering. """""" ), html.P( """""" Read more about the component here: https://github.com/plotly/react-oncoprint """""" ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6(""Mutation colors""), html.P( ""Select a mutation type and a color "" ""to customize its look."" ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]), ","def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( ""What is OncoPrint?"" ), html.P( """""" The OncoPrint component is used to view multiple genomic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the BioConductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, allowing the user to create complex bioinformatic apps or workflows that rely on crossfiltering. """""" ), html.P( """""" Read more about the component here: https://github.com/plotly/react-oncoprint """""" ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6(""Mutation colors""), html.P( ""Select a mutation type and a color "" ""to customize its look."" ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]), " 1891,"def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100, random_state=None): """""" Find the null space of a matrix M. Parameters ---------- M : {array, matrix, sparse matrix, LinearOperator} Input covariance matrix: should be symmetric positive semi-definite k : integer Number of eigenvalues/vectors to return k_skip : integer, default=1 Number of low eigenvalues to skip. eigen_solver : string, {'auto', 'arpack', 'dense'}, default='arpack' auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, default=1e-6 Tolerance for 'arpack' method. Not used if eigen_solver=='dense'. max_iter : int, default=100 Maximum number of iterations for 'arpack' method. Not used if eigen_solver=='dense' random_state : int, RandomState instance, default=None Determines the random number generator when ``solver`` == 'arpack'. Pass an int for reproducible results across multiple function calls. See :term: `Glossary `. """""" if eigen_solver == 'auto': if M.shape[0] > 200 and k + k_skip < 10: eigen_solver = 'arpack' else: eigen_solver = 'dense' if eigen_solver == 'arpack': random_state = check_random_state(random_state) # initialize with [-1,1] as in ARPACK v0 = random_state.uniform(-1, 1, M.shape[0]) try: eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0) except RuntimeError as msg: raise ValueError(""Error in determining null-space with ARPACK. "" ""Error message: '%s'. "" ""Note that method='arpack' can fail when the "" ""weight matrix is singular or otherwise "" ""ill-behaved. method='dense' is recommended. "" ""See online documentation for more information."" % msg) return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) elif eigen_solver == 'dense': if hasattr(M, 'toarray'): M = M.toarray() eigen_values, eigen_vectors = eigh( M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True) index = np.argsort(np.abs(eigen_values)) return eigen_vectors[:, index], np.sum(eigen_values) else: raise ValueError(""Unrecognized eigen_solver '%s'"" % eigen_solver) ","def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100, random_state=None): """""" Find the null space of a matrix M. Parameters ---------- M : {array, matrix, sparse matrix, LinearOperator} Input covariance matrix: should be symmetric positive semi-definite k : integer Number of eigenvalues/vectors to return k_skip : int, default=1 Number of low eigenvalues to skip. eigen_solver : string, {'auto', 'arpack', 'dense'}, default='arpack' auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, default=1e-6 Tolerance for 'arpack' method. Not used if eigen_solver=='dense'. max_iter : int, default=100 Maximum number of iterations for 'arpack' method. Not used if eigen_solver=='dense' random_state : int, RandomState instance, default=None Determines the random number generator when ``solver`` == 'arpack'. Pass an int for reproducible results across multiple function calls. See :term: `Glossary `. """""" if eigen_solver == 'auto': if M.shape[0] > 200 and k + k_skip < 10: eigen_solver = 'arpack' else: eigen_solver = 'dense' if eigen_solver == 'arpack': random_state = check_random_state(random_state) # initialize with [-1,1] as in ARPACK v0 = random_state.uniform(-1, 1, M.shape[0]) try: eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0) except RuntimeError as msg: raise ValueError(""Error in determining null-space with ARPACK. "" ""Error message: '%s'. "" ""Note that method='arpack' can fail when the "" ""weight matrix is singular or otherwise "" ""ill-behaved. method='dense' is recommended. "" ""See online documentation for more information."" % msg) return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) elif eigen_solver == 'dense': if hasattr(M, 'toarray'): M = M.toarray() eigen_values, eigen_vectors = eigh( M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True) index = np.argsort(np.abs(eigen_values)) return eigen_vectors[:, index], np.sum(eigen_values) else: raise ValueError(""Unrecognized eigen_solver '%s'"" % eigen_solver) " 31107,"def execute_get_devices_data_command(client: MobileIronCoreClient, query: str) -> CommandResults: """"""get-devices command: Returns a list of all devices in the mobileiron system based on the query provided. This command might execute multiple API calls if there are a large amount of device to fetch :type client: ``MobileIronCoreClient`` :param client: MobileIron UEM API client to use :type query: ``str`` :param query: query to execute :return: A ``CommandResults`` object that is then passed to ``return_results``, that contains the device data :rtype: ``CommandResults`` """""" params = demisto.params() args = demisto.args() max_fetch = args.get('max_fetch') max_fetch = int(args.get('max_fetch')) if max_fetch else None additional_fields = args.get('additional_fields') if additional_fields: fields = ','.join([STANDARD_DEVICE_FIELDS, additional_fields]) else: fields = STANDARD_DEVICE_FIELDS admin_space_id = params.get('admin_space_id') devices_data_response = client.get_devices_data(admin_space_id=admin_space_id, query=query, fields=fields, max_fetch=max_fetch) return CommandResults( # readable_output=readable_output, outputs_prefix='MobileIronCore.Device', outputs_key_field='common.id', outputs=devices_data_response ) ","def execute_get_devices_data_command(client: MobileIronCoreClient, query: str) -> CommandResults: """"""get-devices command: Returns a list of all devices in the mobileiron system based on the query provided. This command might execute multiple API calls if there are a large amount of device to fetch :type client: ``MobileIronCoreClient`` :param client: MobileIron UEM API client to use :type query: ``str`` :param query: query to execute :return: A ``CommandResults`` object that is then passed to ``return_results``, that contains the device data :rtype: ``CommandResults`` """""" params = demisto.params() args = demisto.args() max_fetch = args.get('max_fetch') max_fetch = min(int(args.get('max_fetch', 50)), 200) additional_fields = args.get('additional_fields') if additional_fields: fields = ','.join([STANDARD_DEVICE_FIELDS, additional_fields]) else: fields = STANDARD_DEVICE_FIELDS admin_space_id = params.get('admin_space_id') devices_data_response = client.get_devices_data(admin_space_id=admin_space_id, query=query, fields=fields, max_fetch=max_fetch) return CommandResults( # readable_output=readable_output, outputs_prefix='MobileIronCore.Device', outputs_key_field='common.id', outputs=devices_data_response ) " 43967,"def generate_scf(mol, n_steps=50, tol=1e-8): r""""""Return a function that performs the self-consistent-field iterations. Args: mol (Molecule): the molecule object n_steps (int): the number of iterations tol (float): convergence tolerance Returns: function: function that performs the self-consistent-field iterations **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True), >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> v_fock, coeffs, fock_matrix, h_core, repulsion_tensor = generate_hartree_fock(mol)(*args) >>> v_fock array([-0.67578019, 0.94181155]) """""" def scf(*args): r""""""Perform the self-consistent-field iterations. Args: args (array[array[float]]): initial values of the differentiable parameters Returns: tuple(array[float]): eigenvalues of the Fock matrix, molecular orbital coefficients, Fock matrix, core matrix """""" basis_functions = mol.basis_set charges = mol.nuclear_charges r = mol.coordinates n_electron = mol.n_electrons if r.requires_grad: repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args[1:]) s = generate_overlap_matrix(basis_functions)(*args[1:]) else: repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args) s = generate_overlap_matrix(basis_functions)(*args) h_core = generate_core_matrix(basis_functions, charges, r)(*args) w, v = anp.linalg.eigh(s) x = v @ anp.diag(anp.array([1 / anp.sqrt(i) for i in w])) @ v.T v_fock, w_fock = anp.linalg.eigh(x.T @ h_core @ x) coeffs = x @ w_fock p = molecular_density_matrix(n_electron, coeffs) for _ in range(n_steps): j = anp.einsum(""pqrs,rs->pq"", repulsion_tensor, p) k = anp.einsum(""psqr,rs->pq"", repulsion_tensor, p) fock_matrix = h_core + 2 * j - k v_fock, w_fock = anp.linalg.eigh(x.T @ fock_matrix @ x) coeffs = x @ w_fock p_update = molecular_density_matrix(n_electron, coeffs) if anp.linalg.norm(p_update - p) <= tol: break p = p_update return v_fock, coeffs, fock_matrix, h_core, repulsion_tensor return scf ","def generate_scf(mol, n_steps=50, tol=1e-8): r""""""Return a function that performs the self-consistent-field calculations. Args: mol (Molecule): the molecule object n_steps (int): the number of iterations tol (float): convergence tolerance Returns: function: function that performs the self-consistent-field iterations **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True), >>> mol = Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> v_fock, coeffs, fock_matrix, h_core, repulsion_tensor = generate_hartree_fock(mol)(*args) >>> v_fock array([-0.67578019, 0.94181155]) """""" def scf(*args): r""""""Perform the self-consistent-field iterations. Args: args (array[array[float]]): initial values of the differentiable parameters Returns: tuple(array[float]): eigenvalues of the Fock matrix, molecular orbital coefficients, Fock matrix, core matrix """""" basis_functions = mol.basis_set charges = mol.nuclear_charges r = mol.coordinates n_electron = mol.n_electrons if r.requires_grad: repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args[1:]) s = generate_overlap_matrix(basis_functions)(*args[1:]) else: repulsion_tensor = generate_repulsion_tensor(basis_functions)(*args) s = generate_overlap_matrix(basis_functions)(*args) h_core = generate_core_matrix(basis_functions, charges, r)(*args) w, v = anp.linalg.eigh(s) x = v @ anp.diag(anp.array([1 / anp.sqrt(i) for i in w])) @ v.T v_fock, w_fock = anp.linalg.eigh(x.T @ h_core @ x) coeffs = x @ w_fock p = molecular_density_matrix(n_electron, coeffs) for _ in range(n_steps): j = anp.einsum(""pqrs,rs->pq"", repulsion_tensor, p) k = anp.einsum(""psqr,rs->pq"", repulsion_tensor, p) fock_matrix = h_core + 2 * j - k v_fock, w_fock = anp.linalg.eigh(x.T @ fock_matrix @ x) coeffs = x @ w_fock p_update = molecular_density_matrix(n_electron, coeffs) if anp.linalg.norm(p_update - p) <= tol: break p = p_update return v_fock, coeffs, fock_matrix, h_core, repulsion_tensor return scf " 35224,"def _complement_commit_status( repo: str, pull_req: int, token: Optional[str], projects: Set[str], context_prefix: str) -> None: gh_repo = github.Github(token).get_repo(repo) gh_commit = gh_repo.get_commit(gh_repo.get_pull(pull_req).head.sha) _log(f'Checking statuses: {repo}, PR #{pull_req}, commit {gh_commit.sha}') contexts = [s.context for s in gh_commit.get_statuses()] for prj in projects: context = f'{context_prefix}/{prj}' if context in contexts: # Preserve status set via previous (real) CI run. continue _log(f'Setting status as skipped: {context}') gh_commit.create_status( state='success', description='Skipped', context=context) ","def _complement_commit_status( repo: str, pull_req: int, token: str, projects: Set[str], context_prefix: str) -> None: gh_repo = github.Github(token).get_repo(repo) gh_commit = gh_repo.get_commit(gh_repo.get_pull(pull_req).head.sha) _log(f'Checking statuses: {repo}, PR #{pull_req}, commit {gh_commit.sha}') contexts = [s.context for s in gh_commit.get_statuses()] for prj in projects: context = f'{context_prefix}/{prj}' if context in contexts: # Preserve status set via previous (real) CI run. continue _log(f'Setting status as skipped: {context}') gh_commit.create_status( state='success', description='Skipped', context=context) " 14019,"def get_srid_from_crs(gdf): """""" Get EPSG code from CRS if available. If not, return -1. """""" from pyproj import CRS if gdf.crs is not None: try: if isinstance(gdf.crs, dict): # If CRS is in dictionary format use only the value # to avoid pyproj Future warning if ""init"" in gdf.crs.keys(): srid = CRS(gdf.crs[""init""]).to_epsg(min_confidence=25) else: srid = CRS(gdf.crs).to_epsg(min_confidence=25) else: srid = CRS(gdf).to_epsg(min_confidence=25) if srid is None: srid = -1 except Exception: srid = -1 print( ""Warning: Could not parse CRS from the GeoDataFrame."", ""Inserting data without defined CRS."", ) return srid ","def _get_srid_from_crs(gdf): """""" Get EPSG code from CRS if available. If not, return -1. """""" from pyproj import CRS if gdf.crs is not None: try: if isinstance(gdf.crs, dict): # If CRS is in dictionary format use only the value # to avoid pyproj Future warning if ""init"" in gdf.crs.keys(): srid = CRS(gdf.crs[""init""]).to_epsg(min_confidence=25) else: srid = CRS(gdf.crs).to_epsg(min_confidence=25) else: srid = CRS(gdf).to_epsg(min_confidence=25) if srid is None: srid = -1 except Exception: srid = -1 print( ""Warning: Could not parse CRS from the GeoDataFrame."", ""Inserting data without defined CRS."", ) return srid " 39493,"def df_index_expr(self, length_expr=None, as_range=False): """"""Generate expression to get or create index of DF"""""" if isinstance(self.index, types.NoneType): if length_expr is None: length_expr = df_length_expr(self) if as_range: return f'range(0, {length_expr})' else: return f'numpy.arange({length_expr})' return 'self._index' ","def df_index_expr(self, length_expr=None, as_range=False): """"""Generate expression to get or create index of DF"""""" if isinstance(self.index, types.NoneType): if length_expr is None: length_expr = df_length_expr(self) if as_range: return f'range({length_expr})' else: return f'numpy.arange({length_expr})' return 'self._index' " 25724,"def test_check_shapes__disable() -> None: with disable_check_shapes(): @check_shapes( ""a: [d...]"", ""b: [d...]"", ""return: [d...]"", ) def f(a: TensorType, b: TensorType) -> TensorType: return a f(t(2, 3), t(2, 4)) # Wrong shape, but checks disabled. f(t(2, 3), t(2, 4)) # Wrong shape, but checks were disable when function was created. @check_shapes( ""a: [d...]"", ""b: [d...]"", ""return: [d...]"", ) def g(a: TensorType, b: TensorType) -> TensorType: return a with pytest.raises(ShapeMismatchError): g(t(2, 3), t(2, 4)) with disable_check_shapes(): f(t(2, 3), t(2, 4)) # Wrong shape, but checks disabled. with pytest.raises(ShapeMismatchError): g(t(2, 3), t(2, 4)) ","def test_check_shapes__disable() -> None: with disable_check_shapes(): @check_shapes( ""a: [d...]"", ""b: [d...]"", ""return: [d...]"", ) def f(a: TensorType, b: TensorType) -> TensorType: return a f(t(2, 3), t(2, 4)) # Wrong shape, but checks disabled. f(t(2, 3), t(2, 4)) # Wrong shape, but checks were disable when function was created. @check_shapes( ""a: [d...]"", ""b: [d...]"", ""return: [d...]"", ) def g(a: TensorType, b: TensorType) -> TensorType: return a with pytest.raises(ShapeMismatchError): g(t(2, 3), t(2, 4)) with disable_check_shapes(): g(t(2, 3), t(2, 4)) # Wrong shape, but checks disabled. with pytest.raises(ShapeMismatchError): g(t(2, 3), t(2, 4)) " 13499,"def register_sizes(regs, in_sizes): """"""Create dictionaries over register sizes and relations Given a list of lists of overlapping register names (e.g. ['eax','ax','al','ah']) and a list of input sizes, it returns the following: * all_regs : list of all valid registers * sizes[reg] : the size of reg in bits * bigger[reg] : list of overlapping registers bigger than reg * smaller[reg]: list of overlapping registers smaller than reg Used in i386/AMD64 shellcode, e.g. the mov-shellcode. Example: >>> regs = [['eax', 'ax', 'al', 'ah'],['ebx', 'bx', 'bl', 'bh'], ... ['ecx', 'cx', 'cl', 'ch'], ... ['edx', 'dx', 'dl', 'dh'], ... ['edi', 'di'], ... ['esi', 'si'], ... ['ebp', 'bp'], ... ['esp', 'sp'], ... ] >>> all_regs, sizes, bigger, smaller = register_sizes(regs, [32, 16, 8, 8]) >>> all_regs ['eax', 'ax', 'al', 'ah', 'ebx', 'bx', 'bl', 'bh', 'ecx', 'cx', 'cl', 'ch', 'edx', 'dx', 'dl', 'dh', 'edi', 'di', 'esi', 'si', 'ebp', 'bp', 'esp', 'sp'] >>> pprint(sizes) {'ah': 8, 'al': 8, 'ax': 16, 'bh': 8, 'bl': 8, 'bp': 16, 'bx': 16, 'ch': 8, 'cl': 8, 'cx': 16, 'dh': 8, 'di': 16, 'dl': 8, 'dx': 16, 'eax': 32, 'ebp': 32, 'ebx': 32, 'ecx': 32, 'edi': 32, 'edx': 32, 'esi': 32, 'esp': 32, 'si': 16, 'sp': 16} >>> pprint(bigger) {'ah': ['eax', 'ax', 'ah'], 'al': ['eax', 'ax', 'al'], 'ax': ['eax', 'ax'], 'bh': ['ebx', 'bx', 'bh'], 'bl': ['ebx', 'bx', 'bl'], 'bp': ['ebp', 'bp'], 'bx': ['ebx', 'bx'], 'ch': ['ecx', 'cx', 'ch'], 'cl': ['ecx', 'cx', 'cl'], 'cx': ['ecx', 'cx'], 'dh': ['edx', 'dx', 'dh'], 'di': ['edi', 'di'], 'dl': ['edx', 'dx', 'dl'], 'dx': ['edx', 'dx'], 'eax': ['eax'], 'ebp': ['ebp'], 'ebx': ['ebx'], 'ecx': ['ecx'], 'edi': ['edi'], 'edx': ['edx'], 'esi': ['esi'], 'esp': ['esp'], 'si': ['esi', 'si'], 'sp': ['esp', 'sp']} >>> pprint(smaller) {'ah': [], 'al': [], 'ax': ['al', 'ah'], 'bh': [], 'bl': [], 'bp': [], 'bx': ['bl', 'bh'], 'ch': [], 'cl': [], 'cx': ['cl', 'ch'], 'dh': [], 'di': [], 'dl': [], 'dx': ['dl', 'dh'], 'eax': ['ax', 'al', 'ah'], 'ebp': ['bp'], 'ebx': ['bx', 'bl', 'bh'], 'ecx': ['cx', 'cl', 'ch'], 'edi': ['di'], 'edx': ['dx', 'dl', 'dh'], 'esi': ['si'], 'esp': ['sp'], 'si': [], 'sp': []} """""" sizes = {} bigger = {} smaller = {} for l in regs: for r, s in zip(l, in_sizes): sizes[r] = s for r in l: bigger[r] = [r_ for r_ in l if sizes[r_] > sizes[r] or r == r_] smaller[r] = [r_ for r_ in l if sizes[r_] < sizes[r]] return lists.concat(regs), sizes, bigger, smaller ","def register_sizes(regs, in_sizes): """"""Create dictionaries over register sizes and relations Given a list of lists of overlapping register names (e.g. ['eax','ax','al','ah']) and a list of input sizes, it returns the following: * all_regs : list of all valid registers * sizes[reg] : the size of reg in bits * bigger[reg] : list of overlapping registers bigger than reg * smaller[reg]: list of overlapping registers smaller than reg Used in i386/AMD64 shellcode, e.g. the mov-shellcode. Example: >>> regs = [['eax', 'ax', 'al', 'ah'],['ebx', 'bx', 'bl', 'bh'], ... ['ecx', 'cx', 'cl', 'ch'], ... ['edx', 'dx', 'dl', 'dh'], ... ['edi', 'di'], ... ['esi', 'si'], ... ['ebp', 'bp'], ... ['esp', 'sp'], ... ] >>> all_regs, sizes, bigger, smaller = register_sizes(regs, [32, 16, 8, 8]) >>> all_regs ['eax', 'ax', 'al', 'ah', 'ebx', 'bx', 'bl', 'bh', 'ecx', 'cx', 'cl', 'ch', 'edx', 'dx', 'dl', 'dh', 'edi', 'di', 'esi', 'si', 'ebp', 'bp', 'esp', 'sp'] >>> pprint(sizes) {'ah': 8, 'al': 8, 'ax': 16, 'bh': 8, 'bl': 8, 'bp': 16, 'bx': 16, 'ch': 8, 'cl': 8, 'cx': 16, 'dh': 8, 'di': 16, 'dl': 8, 'dx': 16, 'eax': 32, 'ebp': 32, 'ebx': 32, 'ecx': 32, 'edi': 32, 'edx': 32, 'esi': 32, 'esp': 32, 'si': 16, 'sp': 16} >>> pprint(bigger) {'ah': ['eax', 'ax', 'ah'], 'al': ['eax', 'ax', 'al'], 'ax': ['eax', 'ax'], 'bh': ['ebx', 'bx', 'bh'], 'bl': ['ebx', 'bx', 'bl'], 'bp': ['ebp', 'bp'], 'bx': ['ebx', 'bx'], 'ch': ['ecx', 'cx', 'ch'], 'cl': ['ecx', 'cx', 'cl'], 'cx': ['ecx', 'cx'], 'dh': ['edx', 'dx', 'dh'], 'di': ['edi', 'di'], 'dl': ['edx', 'dx', 'dl'], 'dx': ['edx', 'dx'], 'eax': ['eax'], 'ebp': ['ebp'], 'ebx': ['ebx'], 'ecx': ['ecx'], 'edi': ['edi'], 'edx': ['edx'], 'esi': ['esi'], 'esp': ['esp'], 'si': ['esi', 'si'], 'sp': ['esp', 'sp']} >>> pprint(smaller) {'ah': [], 'al': [], 'ax': ['al', 'ah'], 'bh': [], 'bl': [], 'bp': [], 'bx': ['bl', 'bh'], 'ch': [], 'cl': [], 'cx': ['cl', 'ch'], 'dh': [], 'di': [], 'dl': [], 'dx': ['dl', 'dh'], 'eax': ['ax', 'al', 'ah'], 'ebp': ['bp'], 'ebx': ['bx', 'bl', 'bh'], 'ecx': ['cx', 'cl', 'ch'], 'edi': ['di'], 'edx': ['dx', 'dl', 'dh'], 'esi': ['si'], 'esp': ['sp'], 'si': [], 'sp': []} """""" sizes = {} bigger = {} smaller = {} for l in regs: for r, s in zip(l, in_sizes): sizes[r] = s for r in l: bigger[r] = [r_ for r_ in l if sizes[r_] > sizes[r] or r == r_] smaller[r] = [r_ for r_ in l if sizes[r_] < sizes[r]] return lists.concat(regs), sizes, bigger, smaller " 34416,"def platform_independent_paths(coll: List[Text]): return [i.replace(""\\"", ""/"") for i in coll] ","def platform_independent_paths(coll: List[Text]) -> List[Text]: return [i.replace(""\\"", ""/"") for i in coll] " 30844,"def delete_group_command(client, args): group_id = args.get('groupId') group_name = args.get('groupName') if not (group_id or group_name): return_error(""You must supply either 'groupId' or 'groupName"") if not group_id: group_id = client.get_group_id(group_name) res = client.delete_group(group_id) if res.status_code != 204: error_json = res.json() return_error(f""Error in API call. Status Code: [{res.status_code}]. Error Response: {error_json}"") readable_output = f'Slack Group: ""{group_id}"" was deleted successfully' return ( readable_output, {}, None) ","def delete_group_command(client, args): group_id = args.get('groupId') group_name = args.get('groupName') if not (group_id or group_name): return_error(""You must supply either 'groupId' or 'groupName'"") if not group_id: group_id = client.get_group_id(group_name) res = client.delete_group(group_id) if res.status_code != 204: error_json = res.json() return_error(f""Error in API call. Status Code: [{res.status_code}]. Error Response: {error_json}"") readable_output = f'Slack Group: ""{group_id}"" was deleted successfully' return ( readable_output, {}, None) " 32746,"def _w_makeRecord(func, instance, args, kwargs): record = func(*args, **kwargs) # add correlation identifiers to LogRecord trace_id, span_id = correlation.get_correlation_ids() if trace_id: record.trace_id = trace_id record.span_id = span_id else: record.trace_id = '' record.span_id = '' return record ","def _w_makeRecord(func, instance, args, kwargs): record = func(*args, **kwargs) # add correlation identifiers to LogRecord trace_id, span_id = correlation.get_correlation_ids() if trace_id: record.trace_id = trace_id record.span_id = span_id else: record.trace_id = 0 record.span_id = '' return record " 45517,"def test_is_enabled_is_true_if_dynamo_table_name_is_set(settings, mocker): # Given table_name = ""random_table_name"" settings.IDENTITIES_TABLE_NAME_DYNAMO = table_name mocked_botot3 = mocker.patch(""environments.dynamodb.dynamodb_wrapper.boto3"") # When dynamo_identity_wrapper = DynamoIdentityWrapper() # Then assert dynamo_identity_wrapper.is_enabled is True mocked_botot3.resource.assert_called_with(""dynamodb"") mocked_botot3.resource.return_value.Table.assert_called_with(table_name) ","def test_is_enabled_is_true_if_dynamo_table_name_is_set(settings, mocker): # Given table_name = ""random_table_name"" settings.IDENTITIES_TABLE_NAME_DYNAMO = table_name mocked_boto3 = mocker.patch(""environments.dynamodb.dynamodb_wrapper.boto3"") # When dynamo_identity_wrapper = DynamoIdentityWrapper() # Then assert dynamo_identity_wrapper.is_enabled is True mocked_botot3.resource.assert_called_with(""dynamodb"") mocked_botot3.resource.return_value.Table.assert_called_with(table_name) " 41829,"def _fast_non_dominated_sort( population: List[""multi_objective.trial.FrozenMultiObjectiveTrial""], directions: List[optuna.study.StudyDirection], ) -> List[List[""multi_objective.trial.FrozenMultiObjectiveTrial""]]: dominated_count = defaultdict(int) # type: DefaultDict[int, int] dominates_list = defaultdict(list) for p, q in itertools.combinations(population, 2): if p._dominates(q, directions): dominates_list[p.number].append(q.number) dominated_count[q.number] += 1 elif q._dominates(p, directions): dominates_list[q.number].append(p.number) dominated_count[p.number] += 1 population_per_rank = [] while len(population) > 0: non_dominated_population = [] i = 0 while i < len(population): if dominated_count[population[i].number] == 0: individual = population[i] if i == len(population) - 1: population.pop() else: population[i] = population.pop() non_dominated_population.append(individual) else: i += 1 for x in non_dominated_population: for y in dominates_list[x.number]: dominated_count[y] -= 1 assert non_dominated_population != [] population_per_rank.append(non_dominated_population) return population_per_rank ","def _fast_non_dominated_sort( population: List[""multi_objective.trial.FrozenMultiObjectiveTrial""], directions: List[optuna.study.StudyDirection], ) -> List[List[""multi_objective.trial.FrozenMultiObjectiveTrial""]]: dominated_count = defaultdict(int) # type: DefaultDict[int, int] dominates_list = defaultdict(list) for p, q in itertools.combinations(population, 2): if p._dominates(q, directions): dominates_list[p.number].append(q.number) dominated_count[q.number] += 1 elif q._dominates(p, directions): dominates_list[q.number].append(p.number) dominated_count[p.number] += 1 population_per_rank = [] while population: non_dominated_population = [] i = 0 while i < len(population): if dominated_count[population[i].number] == 0: individual = population[i] if i == len(population) - 1: population.pop() else: population[i] = population.pop() non_dominated_population.append(individual) else: i += 1 for x in non_dominated_population: for y in dominates_list[x.number]: dominated_count[y] -= 1 assert non_dominated_population != [] population_per_rank.append(non_dominated_population) return population_per_rank " 23069,"def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional .. deprecated:: 1.6.0 This is equivalent to the `density` argument, but produces incorrect results for unequal bin widths. It should not be used. .. versionchanged:: 1.15.0 DeprecationWarnings are actually emitted. weights : array_like, optional A dask.array.Array of weights, of the same block structure as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins ","def histogram(a, bins=None, range=None, normed=False, weights=None, density=None): """""" Blocked variant of :func:`numpy.histogram`. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional Either an iterable specifying the ``bins`` or the number of ``bins`` and a ``range`` argument is required as computing ``min`` and ``max`` over blocked arrays is an expensive operation that must be performed explicitly. If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional .. deprecated:: 1.6.0 This is equivalent to the `density` argument, but produces incorrect results for unequal bin widths. It should not be used. .. versionchanged:: 1.15.0 DeprecationWarnings are actually emitted. weights : array_like, optional A dask.array.Array of weights, of the same block structure as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. If ``density`` is True, ``bins`` cannot be a single-number delayed value. It must be a concrete number, or a (possibly-delayed) array/sequence of the bin edges. Returns ------- hist : dask Array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Examples -------- Using number of bins and range: >>> import dask.array as da >>> import numpy as np >>> x = da.from_array(np.arange(10000), chunks=10) >>> h, bins = da.histogram(x, bins=10, range=[0, 10000]) >>> bins array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000., 8000., 9000., 10000.]) >>> h.compute() array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]) Explicitly specifying the bins: >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000])) >>> bins array([ 0, 5000, 10000]) >>> h.compute() array([5000, 5000]) """""" if isinstance(bins, Array): scalar_bins = bins.ndim == 0 # ^ `np.ndim` is not implemented by Dask array. elif isinstance(bins, Delayed): scalar_bins = bins._length is None or bins._length == 1 else: scalar_bins = np.ndim(bins) == 0 if bins is None or (scalar_bins and range is None): raise ValueError( ""dask.array.histogram requires either specifying "" ""bins as an iterable or specifying both a range and "" ""the number of bins"" ) if weights is not None and weights.chunks != a.chunks: raise ValueError(""Input array and weights must have the same chunked structure"") if normed is not False: raise ValueError( ""The normed= keyword argument has been deprecated. "" ""Please use density instead. "" ""See the numpy.histogram docstring for more information."" ) if density and scalar_bins and isinstance(bins, (Array, Delayed)): raise NotImplementedError( ""When `density` is True, `bins` cannot be a scalar Dask object. "" ""It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."" ) for argname, val in [(""bins"", bins), (""range"", range), (""weights"", weights)]: if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins): raise TypeError( ""Dask types besides Array and Delayed are not supported "" ""for `histogram`. For argument `{}`, got: {!r}"".format(argname, val) ) if range is not None: try: if len(range) != 2: raise ValueError( f""range must be a sequence or array of length 2, but got {len(range)} items"" ) if isinstance(range, (Array, np.ndarray)) and range.shape != (2,): raise ValueError( f""range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"" ) except TypeError: raise TypeError( f""Expected a sequence or array for range, not {range}"" ) from None token = tokenize(a, bins, range, weights, density) name = ""histogram-sum-"" + token if scalar_bins: bins = _linspace_from_delayed(range[0], range[1], bins + 1) # ^ NOTE `range[1]` is safe because of the above check, and the initial check # that range must not be None if `scalar_bins` else: if not isinstance(bins, (Array, np.ndarray)): bins = asarray(bins) if bins.ndim != 1: raise ValueError( f""bins must be a 1-dimensional array or sequence, got shape {bins.shape}"" ) (bins_ref, range_ref), deps = unpack_collections([bins, range]) # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk if weights is None: dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref) for i, k in enumerate(flatten(a.__dask_keys__())) } dtype = np.histogram([])[0].dtype else: a_keys = flatten(a.__dask_keys__()) w_keys = flatten(weights.__dask_keys__()) dsk = { (name, i, 0): (_block_hist, k, bins_ref, range_ref, w) for i, (k, w) in enumerate(zip(a_keys, w_keys)) } dtype = weights.dtype deps = (a,) + deps if weights is not None: deps += (weights,) graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps) # Turn graph into a 2D Array of shape (nchunks, nbins) nchunks = len(list(flatten(a.__dask_keys__()))) nbins = bins.size - 1 # since `bins` is 1D chunks = ((1,) * nchunks, (nbins,)) mapped = Array(graph, name, chunks, dtype=dtype) # Sum over chunks to get the final histogram n = mapped.sum(axis=0) # We need to replicate normed and density options from numpy if density is not None: if density: db = asarray(np.diff(bins).astype(float), chunks=n.chunks) return n / db / n.sum(), bins else: return n, bins else: return n, bins " 24352,"def validate_config_http(file, check): config_valid = True if os.path.isfile(file): with open(file, ""r"") as f: if 'instances/http' not in f.read(): echo_warning(f'Detected {check}\'s spec.yaml file does not contain `instances/http` ' f'but {check} uses http wrapper') return config_valid ","def validate_config_http(file, check): config_valid = True if os.path.isfile(file): with open(file, ""r"") as f: if 'instances/http' not in f.read(): echo_failure(f'Detected {check}\'s spec.yaml file does not contain `instances/http` ' f'but {check} uses http wrapper') return config_valid " 8515,"def chained_action(func): '''Decorator function allowing action function to be chained. This allows a plugin to modify the behaviour of an existing action function. Chain action function must be defined as ``action_function(original_action, context, data_dict)`` where the first parameter will be set to the action function in the next plugin or in core ckan. The chained action may call the original_action function, optionally passing different values, handling exceptions, returning different values and/or raising different exceptions to the caller. Usage:: from ckan.plugins.toolkit import chained_action @chained_action @side_effect_free def package_search(original_action, context, data_dict): return original_action(context, data_dict) :param func: chained action function :type func: callable :returns: chained action function :rtype: callable ''' func.chained_action = True return func ","def chained_action(func): '''Decorator function allowing action function to be chained. This allows a plugin to modify the behaviour of an existing action function. A chained action function must be defined as ``action_function(original_action, context, data_dict)`` where the first parameter will be set to the action function in the next plugin or in core ckan. The chained action may call the original_action function, optionally passing different values, handling exceptions, returning different values and/or raising different exceptions to the caller. Usage:: from ckan.plugins.toolkit import chained_action @chained_action @side_effect_free def package_search(original_action, context, data_dict): return original_action(context, data_dict) :param func: chained action function :type func: callable :returns: chained action function :rtype: callable ''' func.chained_action = True return func " 57814,"def main(): LOG('command is %s' % (demisto.command(),)) try: if demisto.command() == 'test-module': test_module() elif demisto.command() == 'iLert-submit-event': demisto.results(submit_new_event_command(**demisto.args())) elif demisto.command() == 'iLert-acknowledge-event': demisto.results(submit_acknowledge_event_command(**demisto.args())) elif demisto.command() == 'iLert-resolve-event': demisto.results(submit_resolve_event_command(**demisto.args())) except Exception as err: return_error(err) ","def main(): LOG('command is %s' % (demisto.command(),)) try: if demisto.command() == 'test-module': test_module() elif demisto.command() == 'ilert-submit-event': demisto.results(submit_new_event_command(**demisto.args())) elif demisto.command() == 'ilert-acknowledge-event': demisto.results(submit_acknowledge_event_command(**demisto.args())) elif demisto.command() == 'ilert-resolve-event': demisto.results(submit_resolve_event_command(**demisto.args())) except Exception as err: return_error(err) " 30481,"def process_incidet_fields(file_path): """""" Process a incident_fields JSON file Args: file_path: The file path from incident field folder Returns: a list of incident field data. """""" res = [] if checked_type(file_path, (INCIDENT_FIELD_REGEX, PACKS_INCIDENT_FIELDS_REGEX)): print(""adding {} to id_set"".format(file_path)) res.append(get_general_data(file_path)) return res ","def process_incident_fields(file_path): """""" Process a incident_fields JSON file Args: file_path: The file path from incident field folder Returns: a list of incident field data. """""" res = [] if checked_type(file_path, (INCIDENT_FIELD_REGEX, PACKS_INCIDENT_FIELDS_REGEX)): print(""adding {} to id_set"".format(file_path)) res.append(get_general_data(file_path)) return res " 56685,"def get_availability(key, ids): """""" :param str key: the type of identifier :param list of str ids: :rtype: dict """""" ids = [id_ for id_ in ids if id_] # remove infogami.infobase.client.Nothing if not ids: return {} def update_availability_schema_to_v2(v1_resp, ocaid): collections = v1_resp.get('collection', []) v1_resp['identifier'] = ocaid v1_resp['is_restricted'] = v1_resp['status'] != 'open' v1_resp['is_printdisabled'] = 'printdisabled' in collections v1_resp['is_lendable'] = 'inlibrary' in collections v1_resp['is_readable'] = v1_resp['status'] == 'open' # TODO: Make less brittle; maybe add simplelists/copy counts to IA availability # endpoint v1_resp['is_browseable'] = (v1_resp['is_lendable'] and v1_resp['status'] == 'error') # For debugging v1_resp['__src__'] = 'core.models.lending.get_availability' return v1_resp url = '%s?%s=%s' % (config_ia_availability_api_v2_url, key, ','.join(ids)) try: response = requests.get(url, timeout=config_http_request_timeout) items = response.json().get('responses', {}) for pkey in items: ocaid = items[pkey].get('identifier', key == 'identifier' and pkey) items[pkey] = update_availability_schema_to_v2(items[pkey], ocaid) except Exception as e: # TODO: Narrow exception scope logger.exception(""get_availability(%s)"" % url) items = { 'error': 'request_timeout', 'details': str(e) } for pkey in ids: # key could be isbn, ocaid, or openlibrary_[work|edition] ocaid = pkey if key == 'identifier' else None items[pkey] = update_availability_schema_to_v2( {'status': 'error'}, ocaid) return items ","def get_availability(key, ids): """""" :param str key: the type of identifier :param list of str ids: :rtype: dict """""" ids = [id_ for id_ in ids if id_] # remove infogami.infobase.client.Nothing if not ids: return {} def update_availability_schema_to_v2(v1_resp, ocaid): collections = v1_resp.get('collection', []) v1_resp['identifier'] = ocaid v1_resp['is_restricted'] = v1_resp['status'] != 'open' v1_resp['is_printdisabled'] = 'printdisabled' in collections v1_resp['is_lendable'] = 'inlibrary' in collections v1_resp['is_readable'] = v1_resp['status'] == 'open' # TODO: Make less brittle; maybe add simplelists/copy counts to IA availability # endpoint v1_resp['is_browseable'] = (v1_resp['is_lendable'] and v1_resp['status'] == 'error') # For debugging v1_resp['__src__'] = 'core.models.lending.get_availability' return v1_resp url = '%s?%s=%s' % (config_ia_availability_api_v2_url, key, ','.join(ids)) try: response = requests.get(url, timeout=config_http_request_timeout) items = response.json().get('responses', {}) for pkey in items: ocaid = items[pkey].get('identifier', key == 'identifier' and pkey) items[pkey] = update_availability_schema_to_v2(items[pkey], ocaid) return items except Exception as e: # TODO: Narrow exception scope logger.exception(""get_availability(%s)"" % url) items = { 'error': 'request_timeout', 'details': str(e) } for pkey in ids: # key could be isbn, ocaid, or openlibrary_[work|edition] ocaid = pkey if key == 'identifier' else None items[pkey] = update_availability_schema_to_v2( {'status': 'error'}, ocaid) return items " 23669,"def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None): """""" Smooth out short-term model transience using the Prilliman model [1]_. The Prilliman et al. model applies an exponential moving average to the output of a steady-state cell temperature model to account for a module's thermal inertia and smooth out the cell temperature's response to changing weather conditions. .. warning:: This implementation requires the time series inputs to be regularly sampled in time. Data with irregular time steps should be resampled prior to using this function. Parameters ---------- temp_cell : pandas Series Cell temperature modeled with steady-state assumptions [C] wind_speed : pandas Series Wind speed, adjusted to correspond to array height [m/s] unit_mass : float, default 11.1 Total mass of module divided by its one-sided surface area [kg/m^2] coefficients : 4-element list-like, optional Values for coefficients a_0–a_3 from [1]_ Returns ------- temp_cell : pandas Series Smoothed version of the input cell temperature [C] Notes ----- This smoothing model was developed and validated using the SAPM model for the steady-state input. References ---------- .. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani, ""Transient Weighted Moving-Average Model of Photovoltaic Module Back-Surface Temperature,"" IEEE Journal of Photovoltaics, 2020. :doi:`10.1109/JPHOTOV.2020.2992351` """""" # TODO: check inputs to ensure regular spacing? time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds() if time_step >= 1200: # too coarsely sampled for smoothing to be relevant return temp_cell window = min(int(1200 / time_step), # time series > 20 minutes len(temp_cell)) # time series < 20 minutes # prefix with NaNs so that the rolling window is ""full"", # even for the first actual value: prefix = np.full(window, np.nan) temp_cell_prefixed = np.append(prefix, temp_cell.values) # get one row per 20-minute window H = scipy.linalg.hankel(np.arange(window), np.arange(window - 1, len(temp_cell_prefixed))) subsets = temp_cell_prefixed[H].T # calculate weights for the values in each window if coefficients is not None: a = coefficients else: # values from [1], Table II a = [0.0046, 0.00046, -0.00023, -1.6e-5] wind_speed = wind_speed.values P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass timedeltas = np.arange(window, 0, -1) * time_step weights = np.exp(-P[:, np.newaxis] * timedeltas) # set weights corresponding to the prefix values to zero; otherwise the # denominator of the weighted average below would be wrong mask_idx = np.triu_indices(window) np.fliplr(weights)[mask_idx] = 0 # change the first row of weights from zero to nan -- this is a # trick to prevent div by zero warning when dividing by summed weights weights[0, :] = np.nan # finally, take the weighted average of each window numerator = np.nansum(subsets[:-1] * weights, axis=1) denominator = np.sum(weights, axis=1) smoothed = numerator / denominator smoothed[0] = temp_cell.values[0] smoothed = pd.Series(smoothed, index=temp_cell.index) return smoothed ","def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None): """""" Smooth out short-term model transience using the Prilliman model [1]_. The Prilliman et al. model applies an exponential moving average to the output of a steady-state cell temperature model to account for a module's thermal inertia and smooth out the cell temperature's response to changing weather conditions. .. warning:: This implementation requires the time series inputs to be regularly sampled in time. Data with irregular time steps should be resampled prior to using this function. Parameters ---------- temp_cell : pandas Series Cell temperature modeled with steady-state assumptions. [C] wind_speed : pandas Series Wind speed, adjusted to correspond to array height [m/s] unit_mass : float, default 11.1 Total mass of module divided by its one-sided surface area [kg/m^2] coefficients : 4-element list-like, optional Values for coefficients a_0–a_3 from [1]_ Returns ------- temp_cell : pandas Series Smoothed version of the input cell temperature [C] Notes ----- This smoothing model was developed and validated using the SAPM model for the steady-state input. References ---------- .. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani, ""Transient Weighted Moving-Average Model of Photovoltaic Module Back-Surface Temperature,"" IEEE Journal of Photovoltaics, 2020. :doi:`10.1109/JPHOTOV.2020.2992351` """""" # TODO: check inputs to ensure regular spacing? time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds() if time_step >= 1200: # too coarsely sampled for smoothing to be relevant return temp_cell window = min(int(1200 / time_step), # time series > 20 minutes len(temp_cell)) # time series < 20 minutes # prefix with NaNs so that the rolling window is ""full"", # even for the first actual value: prefix = np.full(window, np.nan) temp_cell_prefixed = np.append(prefix, temp_cell.values) # get one row per 20-minute window H = scipy.linalg.hankel(np.arange(window), np.arange(window - 1, len(temp_cell_prefixed))) subsets = temp_cell_prefixed[H].T # calculate weights for the values in each window if coefficients is not None: a = coefficients else: # values from [1], Table II a = [0.0046, 0.00046, -0.00023, -1.6e-5] wind_speed = wind_speed.values P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass timedeltas = np.arange(window, 0, -1) * time_step weights = np.exp(-P[:, np.newaxis] * timedeltas) # set weights corresponding to the prefix values to zero; otherwise the # denominator of the weighted average below would be wrong mask_idx = np.triu_indices(window) np.fliplr(weights)[mask_idx] = 0 # change the first row of weights from zero to nan -- this is a # trick to prevent div by zero warning when dividing by summed weights weights[0, :] = np.nan # finally, take the weighted average of each window numerator = np.nansum(subsets[:-1] * weights, axis=1) denominator = np.sum(weights, axis=1) smoothed = numerator / denominator smoothed[0] = temp_cell.values[0] smoothed = pd.Series(smoothed, index=temp_cell.index) return smoothed " 9475,"def create_or_update_bucket(s3_client, module, location): policy = module.params.get(""policy"") name = module.params.get(""name"") requester_pays = module.params.get(""requester_pays"") tags = module.params.get(""tags"") versioning = module.params.get(""versioning"") changed = False result = {} try: bucket_is_present = bucket_exists(s3_client, name) except EndpointConnectionError as e: module.fail_json_aws(e, msg=""Invalid endpoint provided: %s"" % to_text(e)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to check bucket presence"") if not bucket_is_present: try: bucket_changed = create_bucket(s3_client, name, location) s3_client.get_waiter('bucket_exists').wait(Bucket=name) changed = changed or bucket_changed except WaiterError as e: module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed while creating bucket"") # Versioning try: versioning_status = get_bucket_versioning(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket versioning"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None: module.fail_json_aws(exp, msg=""Failed to get bucket versioning"") else: if versioning is not None: required_versioning = None if versioning and versioning_status.get('Status') != ""Enabled"": required_versioning = 'Enabled' elif not versioning and versioning_status.get('Status') == ""Enabled"": required_versioning = 'Suspended' if required_versioning: try: put_bucket_versioning(s3_client, name, required_versioning) changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket versioning"") versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) # This output format is there to ensure compatibility with previous versions of the module result['versioning'] = { 'Versioning': versioning_status.get('Status', 'Disabled'), 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), } # Requester pays try: requester_pays_status = get_bucket_request_payment(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket request payment"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or requester_pays is not None: module.fail_json_aws(exp, msg=""Failed to get bucket request payment"") else: if requester_pays is not False: payer = 'Requester' if requester_pays else 'BucketOwner' if requester_pays_status != payer: put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) if requester_pays_status is None: # We have seen that it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) changed = True result['requester_pays'] = requester_pays # Policy try: current_policy = get_bucket_policy(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket policy"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None: module.fail_json_aws(exp, msg=""Failed to get bucket policy"") else: if policy is not None: if isinstance(policy, string_types): policy = json.loads(policy) if not policy and current_policy: try: delete_bucket_policy(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to delete bucket policy"") current_policy = wait_policy_is_applied(module, s3_client, name, policy) changed = True elif compare_policies(current_policy, policy): try: put_bucket_policy(s3_client, name, policy) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket policy"") current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) if current_policy is None: # As for request payement, it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_policy(s3_client, name, policy) current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) changed = True result['policy'] = current_policy # Tags try: current_tags_dict = get_current_bucket_tags_dict(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket tags"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or tags is not None: module.fail_json_aws(exp, msg=""Failed to get bucket tags"") else: if tags is not None: # Tags are always returned as text tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) if current_tags_dict != tags: if tags: try: put_bucket_tagging(s3_client, name, tags) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket tags"") else: try: delete_bucket_tagging(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to delete bucket tags"") current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) changed = True result['tags'] = current_tags_dict module.exit_json(changed=changed, name=name, **result) ","def create_or_update_bucket(s3_client, module, location): policy = module.params.get(""policy"") name = module.params.get(""name"") requester_pays = module.params.get(""requester_pays"") tags = module.params.get(""tags"") versioning = module.params.get(""versioning"") changed = False result = {} try: bucket_is_present = bucket_exists(s3_client, name) except EndpointConnectionError as e: module.fail_json_aws(e, msg=""Invalid endpoint provided: %s"" % to_text(e)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to check bucket presence"") if not bucket_is_present: try: bucket_changed = create_bucket(s3_client, name, location) s3_client.get_waiter('bucket_exists').wait(Bucket=name) changed = changed or bucket_changed except WaiterError as e: module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed while creating bucket"") # Versioning try: versioning_status = get_bucket_versioning(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket versioning"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None: module.fail_json_aws(exp, msg=""Failed to get bucket versioning"") else: if versioning is not None: required_versioning = None if versioning and versioning_status.get('Status') != ""Enabled"": required_versioning = 'Enabled' elif not versioning and versioning_status.get('Status') == ""Enabled"": required_versioning = 'Suspended' if required_versioning: try: put_bucket_versioning(s3_client, name, required_versioning) changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket versioning"") versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) # This output format is there to ensure compatibility with previous versions of the module result['versioning'] = { 'Versioning': versioning_status.get('Status', 'Disabled'), 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), } # Requester pays try: requester_pays_status = get_bucket_request_payment(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket request payment"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or requester_pays is not None: module.fail_json_aws(exp, msg=""Failed to get bucket request payment"") else: if requester_pays: payer = 'Requester' if requester_pays else 'BucketOwner' if requester_pays_status != payer: put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) if requester_pays_status is None: # We have seen that it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) changed = True result['requester_pays'] = requester_pays # Policy try: current_policy = get_bucket_policy(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket policy"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None: module.fail_json_aws(exp, msg=""Failed to get bucket policy"") else: if policy is not None: if isinstance(policy, string_types): policy = json.loads(policy) if not policy and current_policy: try: delete_bucket_policy(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to delete bucket policy"") current_policy = wait_policy_is_applied(module, s3_client, name, policy) changed = True elif compare_policies(current_policy, policy): try: put_bucket_policy(s3_client, name, policy) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket policy"") current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False) if current_policy is None: # As for request payement, it happens quite a lot of times that the put request was not taken into # account, so we retry one more time put_bucket_policy(s3_client, name, policy) current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) changed = True result['policy'] = current_policy # Tags try: current_tags_dict = get_current_bucket_tags_dict(s3_client, name) except BotoCoreError as exp: module.fail_json_aws(exp, msg=""Failed to get bucket tags"") except ClientError as exp: if exp.response['Error']['Code'] != 'NotImplemented' or tags is not None: module.fail_json_aws(exp, msg=""Failed to get bucket tags"") else: if tags is not None: # Tags are always returned as text tags = dict((to_text(k), to_text(v)) for k, v in tags.items()) if current_tags_dict != tags: if tags: try: put_bucket_tagging(s3_client, name, tags) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to update bucket tags"") else: try: delete_bucket_tagging(s3_client, name) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=""Failed to delete bucket tags"") current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) changed = True result['tags'] = current_tags_dict module.exit_json(changed=changed, name=name, **result) " 50456,"def email_exists_or_retired(email): """""" Check an email against the User model for existence. """""" return User.objects.filter(email=email).exists() or is_email_retired(email) or \ AccountRecovery.objects.filter(secondary_email=email) ","def email_exists_or_retired(email): """""" Check an email against the User model for existence. """""" return ( User.objects.filter(email=email).exists() or is_email_retired(email) or AccountRecovery.objects.filter(secondary_email=email).exists() ) " 28369,"def _hypergeom_clusters( cluster_labels: np.ndarray, keywords: List[List[str]], fdr_threshold: float, n_words: int ) -> Dict[int, List[str]]: keywords = [[w for w, _ in doc_keywords] for doc_keywords in keywords] clusters_keywords = {} for label in sorted(set(cluster_labels) - {-1}): indices = set(np.flatnonzero(cluster_labels == label)) kwds = [k for i, k in enumerate(keywords) if i in indices] clusters_keywords[label] = kwds cv = CountVectorizer(tokenizer=lambda w: w, preprocessor=lambda w: w) X = cv.fit_transform(list(chain.from_iterable(clusters_keywords.values()))) all_keywords = np.array(cv.get_feature_names_out()) index = 0 selected_clusters_keywords = {} all_scores, all_p_values = [], [] for label, cls_kwds in clusters_keywords.items(): # find words that should be specific for a group with hypergeom test n_docs = len(cls_kwds) p_values = hypergeom_p_values(X, X[index:index + n_docs]) words = set(all_keywords[np.array(p_values) < fdr_threshold]) # select only words with p-values less than threshold sel_words = [w for w in chain.from_iterable(cls_kwds)] sel_words = [w for w in sel_words if w in words] sel_words = [(w, c / n_docs) for w, c in Counter(sel_words).most_common(n_words)] selected_clusters_keywords[label] = sel_words all_scores.append(X[index:index + n_docs].sum(axis=0) / n_docs) all_p_values.append(p_values) index += n_docs all_scores = np.vstack(all_scores) all_p_values = np.vstack(all_p_values) return selected_clusters_keywords, all_keywords, all_scores, all_p_values ","def _hypergeom_clusters( cluster_labels: np.ndarray, keywords: List[List[str]], fdr_threshold: float, n_words: int ) -> Tuple[Dict[int, List[str]], np.ndarray, np.ndarray, np.ndarray]: keywords = [[w for w, _ in doc_keywords] for doc_keywords in keywords] clusters_keywords = {} for label in sorted(set(cluster_labels) - {-1}): indices = set(np.flatnonzero(cluster_labels == label)) kwds = [k for i, k in enumerate(keywords) if i in indices] clusters_keywords[label] = kwds cv = CountVectorizer(tokenizer=lambda w: w, preprocessor=lambda w: w) X = cv.fit_transform(list(chain.from_iterable(clusters_keywords.values()))) all_keywords = np.array(cv.get_feature_names_out()) index = 0 selected_clusters_keywords = {} all_scores, all_p_values = [], [] for label, cls_kwds in clusters_keywords.items(): # find words that should be specific for a group with hypergeom test n_docs = len(cls_kwds) p_values = hypergeom_p_values(X, X[index:index + n_docs]) words = set(all_keywords[np.array(p_values) < fdr_threshold]) # select only words with p-values less than threshold sel_words = [w for w in chain.from_iterable(cls_kwds)] sel_words = [w for w in sel_words if w in words] sel_words = [(w, c / n_docs) for w, c in Counter(sel_words).most_common(n_words)] selected_clusters_keywords[label] = sel_words all_scores.append(X[index:index + n_docs].sum(axis=0) / n_docs) all_p_values.append(p_values) index += n_docs all_scores = np.vstack(all_scores) all_p_values = np.vstack(all_p_values) return selected_clusters_keywords, all_keywords, all_scores, all_p_values " 43896,"def mol_basis_data(name, symbols): r""""""Generates default basis set parameters for a molecule. This function generates the default basis set parameters for a list of atomic symbols and computes the total number of basis functions for each atom. Args: name (str): name of the basis set symbols (list[str]): symbols of the atomic species in the molecule Returns: tuple(list, tuple): the number of basis functions and the basis set parameters for each atom in the molecule **Example** >>> n_basis, params = mol_basis_data('sto-3g', ['H', 'H']) >>> print(n_basis) [1, 1] >>> print(params) (((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422]), ((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422])) """""" n_basis = [] basis_set = [] for s in symbols: basis = atom_basis_data(name, s) n_basis += [len(basis)] basis_set += basis return n_basis, tuple(basis_set) ","def mol_basis_data(name, symbols): r""""""Generates default basis set parameters for a molecule. This function generates the default basis set parameters for a list of atomic symbols and computes the total number of basis functions for each atom. Args: name (str): name of the basis set symbols (list[str]): symbols of the atomic species in the molecule Returns: tuple(list, tuple): the number of basis functions and the basis set parameters for each atom in the molecule **Example** >>> n_basis, params = mol_basis_data('sto-3g', ['H', 'H']) >>> print(n_basis) [1, 1] >>> print(params) (((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422]), ((0, 0, 0), [3.425250914, 0.6239137298, 0.168855404], [0.1543289673, 0.5353281423, 0.4446345422])) """""" n_basis = [] basis_set = [] for s in symbols: basis = atom_basis_data(name, s) n_basis += [len(basis)] basis_set += basis return n_basis, tuple(basis_set) " 53444,"def test_base_checker_ordering(): fake_checker_1 = OtherBasicChecker() fake_checker_2 = LessBasicChecker() fake_checker_3 = DifferentBasicChecker() assert fake_checker_1 < fake_checker_3 assert fake_checker_2 < fake_checker_3 ","def test_base_checker_ordering() -> None: """"""Test ordering of checkers based on their __gt__ method."""""" fake_checker_1 = OtherBasicChecker() fake_checker_2 = LessBasicChecker() fake_checker_3 = DifferentBasicChecker() assert fake_checker_1 < fake_checker_3 assert fake_checker_2 < fake_checker_3 " 8995,"def handle_init(options): """"""Use config wizard to initialize a new configuration file for the bot. :param options: parsed arguments :type options: :class:`argparse.Namespace` :return: 0 if everything went fine; 1 if the file is invalid or if it already exists .. note:: Due to how the config wizard works, the configuration filename's extension **must be** ``.cfg``. """""" config_filename = utils.find_config(options.configdir, options.config) config_name, ext = os.path.splitext(config_filename) if ext and ext != '.cfg': tools.stderr('Configuration wizard accepts .cfg files only') return 1 elif not ext: config_filename = config_name + '.cfg' if os.path.isfile(config_filename): tools.stderr('Configuration file %s already exists' % config_filename) return 1 print('Starting Sopel config wizard for: %s' % config_filename) try: utils.wizard(config_name) except KeyboardInterrupt: tools.stderr('\nOperation cancelled, no file has been created.') return 1 # cancelled operation return 0 # successful operation ","def handle_init(options): """"""Use config wizard to initialize a new configuration file for the bot. :param options: parsed arguments :type options: :class:`argparse.Namespace` :return: 0 if everything went fine; 1 if the file is invalid or if it already exists .. note:: Due to how the config wizard works, the configuration filename's extension **must be** ``.cfg``. """""" config_filename = utils.find_config(options.configdir, options.config) config_name, ext = os.path.splitext(config_filename) if ext and ext != '.cfg': tools.stderr('Configuration wizard accepts .cfg files only') return 1 elif not ext: config_filename = config_name + '.cfg' if os.path.isfile(config_filename): tools.stderr('Configuration file %s already exists' % config_filename) return 1 print('Starting Sopel config wizard for: %s' % config_filename) try: utils.wizard(config_name) except KeyboardInterrupt: tools.stderr('\nOperation cancelled; no file has been created.') return 1 # cancelled operation return 0 # successful operation " 27940,"def to_device(model, communicator, use_gpu, use_chx): """"""Send Chainer model to devices Currently in Chainer, we have 3 officially-supported backends (numpy, cupy, chainerx) and 2 devices (CPU and NVIDIA GPUs). Also, ChainerX has its own backend system, so there are 4 combinations (numpy, cupy, chainerx+native, chainerx+cuda). This utility function is a boilerplate to send Chainer model to backend devices in tests in test/chainermn_tests. """""" if use_gpu: # We need to set GPU id every time we call to_device(), # because each test chainer.cuda.get_device_from_id(communicator.intra_rank).use() if use_chx: device = 'cuda:{}'.format(communicator.intra_rank) else: # cupy device = '@cupy:{}'.format(communicator.intra_rank) else: if use_chx: device = 'native:0' else: device = -1 device = chainer.get_device(device) model.to_device(device) ","def to_device(model, communicator, use_gpu, use_chx): """"""Send Chainer model to devices Currently in Chainer, we have 3 officially-supported backends (numpy, cupy, chainerx) and 2 devices (CPU and NVIDIA GPUs). Also, ChainerX has its own backend system, so there are 4 combinations (numpy, cupy, chainerx+native, chainerx+cuda). This utility function is a boilerplate to send Chainer model to backend devices in tests in test/chainermn_tests. """""" if use_gpu: # We need to set GPU id every time we call to_device(), # because each test chainer.cuda.get_device_from_id(communicator.intra_rank).use() if use_chx: device = 'cuda:{}'.format(communicator.intra_rank) else: # cupy device = '@cupy:{}'.format(communicator.intra_rank) else: if use_chx: device = 'native:0' else: device = '@numpy' device = chainer.get_device(device) model.to_device(device) " 24468,"def create_datadog_conf_file(tmp_dir): container_ip = get_container_ip(SNMP_CONTAINER_NAME) prefix = ""."".join(container_ip.split('.')[:3]) datadog_conf = { 'snmp_listener': { 'workers': 4, 'discovery_interval': 10, 'configs': [ # { # 'network': '{}.0/29'.format(prefix), # 'port': PORT, # 'community': 'generic-router', # 'version': 2, # 'timeout': 1, # 'retries': 2, # 'tags': [ # ""tag1:val1"", # ""tag2:val2"", # ], # 'loader': 'core', # }, # { # 'network': '{}.0/28'.format(prefix), # 'port': PORT, # 'community': 'apc_ups', # 'version': 2, # 'timeout': 1, # 'retries': 2, # }, { 'network': '{}.0/27'.format(prefix), 'port': PORT, 'version': 3, 'timeout': 1, 'retries': 2, 'user': 'datadogSHADES', 'authentication_key': 'doggiepass', 'authentication_protocol': 'sha', 'privacy_key': 'doggiePRIVkey', 'privacy_protocol': 'des', 'context_name': 'public', 'ignored_ip_addresses': {'{}.2'.format(prefix): True}, 'loader': 'core', }, ], }, 'listeners': [{'name': 'snmp'}], 'check_runners': -1, } datadog_conf_file = os.path.join(tmp_dir, 'datadog.yaml') with open(datadog_conf_file, 'wb') as file: file.write(yaml.dump(datadog_conf)) return datadog_conf_file ","def create_datadog_conf_file(tmp_dir): container_ip = get_container_ip(SNMP_CONTAINER_NAME) prefix = ""."".join(container_ip.split('.')[:3]) datadog_conf = { 'snmp_listener': { 'workers': 4, 'discovery_interval': 10, 'configs': [ # { # 'network': '{}.0/29'.format(prefix), # 'port': PORT, # 'community': 'generic-router', # 'version': 2, # 'timeout': 1, # 'retries': 2, # 'tags': [ # ""tag1:val1"", # ""tag2:val2"", # ], # 'loader': 'core', # }, # { # 'network': '{}.0/28'.format(prefix), # 'port': PORT, # 'community': 'apc_ups', # 'version': 2, # 'timeout': 1, # 'retries': 2, # }, { 'network': '{}.0/27'.format(prefix), 'port': PORT, 'version': 3, 'timeout': 1, 'retries': 2, 'user': 'datadogSHADES', 'authentication_key': 'doggiepass', 'authentication_protocol': 'sha', 'privacy_key': 'doggiePRIVkey', 'privacy_protocol': 'des', 'context_name': 'public', 'ignored_ip_addresses': {'{}.2'.format(prefix): True}, 'loader': 'core', }, ], }, 'listeners': [{'name': 'snmp'}], 'check_runners': 4, } datadog_conf_file = os.path.join(tmp_dir, 'datadog.yaml') with open(datadog_conf_file, 'wb') as file: file.write(yaml.dump(datadog_conf)) return datadog_conf_file " 32103,"def fetch_context(force_refresh: bool = False) -> dict: """""" Fetches the integration instance context from the server if the CACHE_EXPIRY is smaller than the current epoch time In the event that the cache is not expired, we return a cached copy of the context which has been stored in memory. We can force the retrieval of the updated context by setting the force_refresh flag to True. :param force_refresh: bool: Indicates if the context should be refreshed regardless of the expiry time. :return: dict: Either a cached copy of the integration context, or the context itself. """""" demisto.info(""Fetching context using fetch_context"") global CACHED_INTEGRATION_CONTEXT, CACHE_EXPIRY if (CACHE_EXPIRY <= int(datetime.now(timezone.utc).timestamp() * 1000)) or force_refresh: demisto.debug(""Cached context has expired. Fetching new context"") CACHE_EXPIRY = int(datetime.now(timezone.utc).timestamp() * 1000) + 300 CACHED_INTEGRATION_CONTEXT = get_integration_context(SYNC_CONTEXT) return CACHED_INTEGRATION_CONTEXT ","def fetch_context(force_refresh: bool = False) -> dict: """""" Fetches the integration instance context from the server if the CACHE_EXPIRY is smaller than the current epoch time In the event that the cache is not expired, we return a cached copy of the context which has been stored in memory. We can force the retrieval of the updated context by setting the force_refresh flag to True. :param force_refresh: bool: Indicates if the context should be refreshed regardless of the expiry time. :return: dict: Either a cached copy of the integration context, or the context itself. """""" demisto.info(""Fetching context using fetch_context"") global CACHED_INTEGRATION_CONTEXT, CACHE_EXPIRY if (CACHE_EXPIRY <= int(datetime.now(timezone.utc).timestamp() * 1000)) or force_refresh: demisto.debug(f'Cached context has expired or forced refresh. forced refresh value is {force_refresh}. Fetching new context') CACHE_EXPIRY = int(datetime.now(timezone.utc).timestamp() * 1000) + 300 CACHED_INTEGRATION_CONTEXT = get_integration_context(SYNC_CONTEXT) return CACHED_INTEGRATION_CONTEXT " 42940,"def rbfkernel(R, sigma): r""""""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a constant. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, sigma) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): a constant. Returns: K (array): the kernel matrix. """""" K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K ","def rbfkernel(R, sigma): r""""""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a constant. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, sigma) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): kernel parameter Returns: K (array): the kernel matrix. """""" K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K " 12426,"def get_required_packages(cfg: dict): """"""identify required packages for install"""""" packages = [] if not subp.which(""lxd""): packages.append(""lxd"") # binary for pool creation must be available for the requested backend: # zfs, lvcreate, mkfs.btrfs storage: str = cfg.get(""storage_backend"", """") if storage: if storage == ""zfs"" and not subp.which(""zfs""): packages.append(""zfsutils-linux"") if storage == ""lvm"" and not subp.which(""lvcreate""): packages.append(""lvm2"") if storage == ""btrfs"" and not subp.which(""mkfs.btrfs""): packages.append(""btrfs-progs"") return packages ","def get_required_packages(cfg: dict) -> List[str]: """"""identify required packages for install"""""" packages = [] if not subp.which(""lxd""): packages.append(""lxd"") # binary for pool creation must be available for the requested backend: # zfs, lvcreate, mkfs.btrfs storage: str = cfg.get(""storage_backend"", """") if storage: if storage == ""zfs"" and not subp.which(""zfs""): packages.append(""zfsutils-linux"") if storage == ""lvm"" and not subp.which(""lvcreate""): packages.append(""lvm2"") if storage == ""btrfs"" and not subp.which(""mkfs.btrfs""): packages.append(""btrfs-progs"") return packages " 57581,"def np_random(seed=None): if seed is not None and not (isinstance(seed, int) and 0 <= seed): raise error.Error( ""Seed must be a non-negative integer or omitted, not {}"".format(seed) ) rng = np.random.default_rng(seed) seed = rng.bit_generator._seed_seq.entropy return rng, seed ","def np_random(seed=None): if seed is not None and not (isinstance(seed, int) and 0 <= seed): raise error.Error( ""Seed must be a non-negative integer or omitted, not {}"".format(seed) ) sq = np.random.SeedSequence(seed) seed = sq.entropy rng = np.random.Generator(np.random.PCG64(sq)) return rng, seed " 46142,"def bundled_files(model, file_type='javascript'): bdir = os.path.join(PANEL_DIR, 'dist', 'bundled', model.__name__.lower()) name = model.__name__.lower() files = [] for url in getattr(model, f""__{file_type}_raw__"", []): filepath = url_path(url) test_filepath = filepath.split('?')[0] if RESOURCE_MODE0 == 'server' and os.path.isfile(os.path.join(bdir, test_filepath)): files.append(f'static/extensions/panel/bundled/{name}/{filepath}') else: files.append(url) return files ","def bundled_files(model, file_type='javascript'): bdir = os.path.join(PANEL_DIR, 'dist', 'bundled', model.__name__.lower()) name = model.__name__.lower() files = [] for url in getattr(model, f""__{file_type}_raw__"", []): filepath = url_path(url) test_filepath = filepath.split('?')[0] if RESOURCE_MODE == 'server' and os.path.isfile(os.path.join(bdir, test_filepath)): files.append(f'static/extensions/panel/bundled/{name}/{filepath}') else: files.append(url) return files " 33432,"def run_parallel(config, venv_dict): """"""here we'll just start parallel sub-processes"""""" live_out = config.option.parallel_live nospinner = config.option.parallel_nospinner args = [sys.executable, MAIN_FILE] + config.args try: position = args.index(""--"") except ValueError: position = len(args) max_parallel = config.option.parallel if max_parallel is None: max_parallel = len(venv_dict) semaphore = Semaphore(max_parallel) finished = Event() show_progress = (not live_out or not nospinner) and reporter.verbosity() > reporter.Verbosity.QUIET with Spinner(enabled=show_progress) as spinner: def run_in_thread(tox_env, os_env, processes): env_name = tox_env.envconfig.envname status = ""skipped tests"" if config.option.notest else None try: os_env[str(PARALLEL_ENV_VAR_KEY)] = str(env_name) args_sub = list(args) if hasattr(tox_env, ""package""): args_sub.insert(position, str(tox_env.package)) args_sub.insert(position, ""--installpkg"") with tox_env.new_action(""parallel {}"".format(tox_env.name)) as action: def collect_process(process): processes[tox_env] = (action, process) action.popen( args=args_sub, env=os_env, redirect=not live_out, capture_err=live_out, callback=collect_process, ) except InvocationError as err: status = ""parallel child exit code {}"".format(err.exit_code) finally: semaphore.release() finished.set() tox_env.status = status done.add(env_name) outcome = spinner.succeed if config.option.notest: outcome = spinner.skip elif status is not None: outcome = spinner.fail outcome(env_name) threads = deque() processes = {} todo_keys = set(venv_dict.keys()) todo = OrderedDict((n, todo_keys & set(v.envconfig.depends)) for n, v in venv_dict.items()) done = set() try: while todo: for name, depends in list(todo.items()): if depends - done: # skip if has unfinished dependencies continue del todo[name] venv = venv_dict[name] semaphore.acquire(blocking=True) spinner.add(name) thread = Thread( target=run_in_thread, args=(venv, os.environ.copy(), processes) ) thread.daemon = True thread.start() threads.append(thread) if todo: # wait until someone finishes and retry queuing jobs finished.wait() finished.clear() while threads: threads = [ thread for thread in threads if not thread.join(0.1) and thread.is_alive() ] except KeyboardInterrupt: reporter.verbosity0( ""[{}] KeyboardInterrupt parallel - stopping children"".format(os.getpid()) ) while True: # do not allow to interrupt until children interrupt try: # putting it inside a thread so it's not interrupted stopper = Thread(target=_stop_child_processes, args=(processes, threads)) stopper.start() stopper.join() except KeyboardInterrupt: continue raise KeyboardInterrupt ","def run_parallel(config, venv_dict): """"""here we'll just start parallel sub-processes"""""" live_out = config.option.parallel_live nospinner = config.option.parallel_nospinner args = [sys.executable, MAIN_FILE] + config.args try: position = args.index(""--"") except ValueError: position = len(args) max_parallel = config.option.parallel if max_parallel is None: max_parallel = len(venv_dict) semaphore = Semaphore(max_parallel) finished = Event() show_progress = allow_spinner and not live_out and reporter.verbosity() > reporter.Verbosity.QUIET with Spinner(enabled=show_progress) as spinner: def run_in_thread(tox_env, os_env, processes): env_name = tox_env.envconfig.envname status = ""skipped tests"" if config.option.notest else None try: os_env[str(PARALLEL_ENV_VAR_KEY)] = str(env_name) args_sub = list(args) if hasattr(tox_env, ""package""): args_sub.insert(position, str(tox_env.package)) args_sub.insert(position, ""--installpkg"") with tox_env.new_action(""parallel {}"".format(tox_env.name)) as action: def collect_process(process): processes[tox_env] = (action, process) action.popen( args=args_sub, env=os_env, redirect=not live_out, capture_err=live_out, callback=collect_process, ) except InvocationError as err: status = ""parallel child exit code {}"".format(err.exit_code) finally: semaphore.release() finished.set() tox_env.status = status done.add(env_name) outcome = spinner.succeed if config.option.notest: outcome = spinner.skip elif status is not None: outcome = spinner.fail outcome(env_name) threads = deque() processes = {} todo_keys = set(venv_dict.keys()) todo = OrderedDict((n, todo_keys & set(v.envconfig.depends)) for n, v in venv_dict.items()) done = set() try: while todo: for name, depends in list(todo.items()): if depends - done: # skip if has unfinished dependencies continue del todo[name] venv = venv_dict[name] semaphore.acquire(blocking=True) spinner.add(name) thread = Thread( target=run_in_thread, args=(venv, os.environ.copy(), processes) ) thread.daemon = True thread.start() threads.append(thread) if todo: # wait until someone finishes and retry queuing jobs finished.wait() finished.clear() while threads: threads = [ thread for thread in threads if not thread.join(0.1) and thread.is_alive() ] except KeyboardInterrupt: reporter.verbosity0( ""[{}] KeyboardInterrupt parallel - stopping children"".format(os.getpid()) ) while True: # do not allow to interrupt until children interrupt try: # putting it inside a thread so it's not interrupted stopper = Thread(target=_stop_child_processes, args=(processes, threads)) stopper.start() stopper.join() except KeyboardInterrupt: continue raise KeyboardInterrupt " 14009,"def to_file(df, filename, driver=""ESRI Shapefile"", schema=None, index=None, **kwargs): """""" Write this GeoDataFrame to an OGR data source A dictionary of supported OGR providers is available via: >>> import fiona >>> fiona.supported_drivers Parameters ---------- df : GeoDataFrame to be written filename : string File path or file handle to write to. driver : string, default 'ESRI Shapefile' The OGR format driver used to write the vector file. schema : dict, default None If specified, the schema dictionary is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the schema based on each column's dtype index : bool, default None If True, write index into one or more columns (for MultiIndex). Default None automatically determines if index is written if it is either named or is a MultiIndex. .. versionadded:: 0.7 Previously the index was not written. The *kwargs* are passed to fiona.open and can be used to write to multi-layer data, store data within archives (zip files), etc. The path may specify a fiona VSI scheme. """""" if index is None: index = list(df.index.names) != [None] if index is True: df = df.reset_index(drop=False) if schema is None: schema = infer_schema(df) with fiona_env(): with fiona.open( filename, ""w"", driver=driver, crs=df.crs, schema=schema, **kwargs ) as colxn: colxn.writerecords(df.iterfeatures()) ","def to_file(df, filename, driver=""ESRI Shapefile"", schema=None, index=None, **kwargs): """""" Write this GeoDataFrame to an OGR data source A dictionary of supported OGR providers is available via: >>> import fiona >>> fiona.supported_drivers Parameters ---------- df : GeoDataFrame to be written filename : string File path or file handle to write to. driver : string, default 'ESRI Shapefile' The OGR format driver used to write the vector file. schema : dict, default None If specified, the schema dictionary is passed to Fiona to better control how the file is written. If None, GeoPandas will determine the schema based on each column's dtype index : bool, default None If True, write index into one or more columns (for MultiIndex). Default None automatically determines if index is written if it is either named or is a MultiIndex. .. versionadded:: 0.7 Previously the index was not written. The *kwargs* are passed to fiona.open and can be used to write to multi-layer data, store data within archives (zip files), etc. The path may specify a fiona VSI scheme. """""" if index is None: index = list(df.index.names) != [None] if index: df = df.reset_index(drop=False) if schema is None: schema = infer_schema(df) with fiona_env(): with fiona.open( filename, ""w"", driver=driver, crs=df.crs, schema=schema, **kwargs ) as colxn: colxn.writerecords(df.iterfeatures()) " 49147,"def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False, forced=False, change_into_dir=None): """""" Extract file at given path to specified directory :param fn: path to file to extract :param dest: location to extract to :param cmd: extract command to use (derived from filename if not specified) :param extra_options: extra options to pass to extract command :param overwrite: overwrite existing unpacked file :param forced: force extraction in (extended) dry run mode :param change_into_dir: change into resulting directory; None (current default) implies True, but this is deprecated, this named argument should be set to False or True explicitely (in a future major release, default will be changed to False) :return: path to directory (in case of success) """""" if change_into_dir is None: _log.deprecated(""extract_file function was called without specifying value for change_into_dir"", '5.0') change_into_dir = True if not os.path.isfile(fn) and not build_option('extended_dry_run'): raise EasyBuildError(""Can't extract file %s: no such file"", fn) mkdir(dest, parents=True) # use absolute pathnames from now on abs_dest = os.path.abspath(dest) # change working directory _log.debug(""Unpacking %s in directory %s"", fn, abs_dest) cwd = change_dir(abs_dest) if cmd: # complete command template with filename cmd = cmd % fn _log.debug(""Using specified command to unpack %s: %s"", fn, cmd) else: cmd = extract_cmd(fn, overwrite=overwrite) _log.debug(""Using command derived from file extension to unpack %s: %s"", fn, cmd) if not cmd: raise EasyBuildError(""Can't extract file %s with unknown filetype"", fn) if extra_options: cmd = ""%s %s"" % (cmd, extra_options) run.run_cmd(cmd, simple=True, force_in_dry_run=forced) # note: find_base_dir also changes into the base dir! base_dir = find_base_dir() # if changing into obtained directory is not desired, # change back to where we came from (unless that was a non-existing directory) if not change_into_dir: if cwd is None: raise EasyBuildError(""Can't change back to non-existing directory after extracting %s in %s"", fn, dest) else: change_dir(cwd) return base_dir ","def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False, forced=False, change_into_dir=None): """""" Extract file at given path to specified directory :param fn: path to file to extract :param dest: location to extract to :param cmd: extract command to use (derived from filename if not specified) :param extra_options: extra options to pass to extract command :param overwrite: overwrite existing unpacked file :param forced: force extraction in (extended) dry run mode :param change_into_dir: change into resulting directory; None (current default) implies True, but this is deprecated, this named argument should be set to False or True explicitely (in a future major release, default will be changed to False) :return: path to directory (in case of success) """""" if change_into_dir is None: _log.deprecated(""extract_file function was called without specifying value for change_into_dir"", '5.0') change_into_dir = True if not os.path.isfile(fn) and not build_option('extended_dry_run'): raise EasyBuildError(""Can't extract file %s: no such file"", fn) mkdir(dest, parents=True) # use absolute pathnames from now on abs_dest = os.path.abspath(dest) # change working directory _log.debug(""Unpacking %s in directory %s"", fn, abs_dest) cwd = change_dir(abs_dest) if cmd: # complete command template with filename cmd = cmd % fn _log.debug(""Using specified command to unpack %s: %s"", fn, cmd) else: try: cmd = extract_cmd(fn, overwrite=overwrite) _log.debug(""Using command derived from file extension to unpack %s: %s"", fn, cmd) except KeyError as err: raise EasyBuildError(""Can't extract file %s with unknown filetype"", fn) if extra_options: cmd = ""%s %s"" % (cmd, extra_options) run.run_cmd(cmd, simple=True, force_in_dry_run=forced) # note: find_base_dir also changes into the base dir! base_dir = find_base_dir() # if changing into obtained directory is not desired, # change back to where we came from (unless that was a non-existing directory) if not change_into_dir: if cwd is None: raise EasyBuildError(""Can't change back to non-existing directory after extracting %s in %s"", fn, dest) else: change_dir(cwd) return base_dir " 26280,"def _sanitize_label(value: str) -> str: """"""Return a legal value for a BigQuery label."""""" value = value.strip().lower() value = _SANITIZE_LABEL_PATTERN.sub(""_"", value) value_length = len(value) if value_length > _VALIDATE_LABEL_LENGTH_LIMIT: error_msg = ( f""Current label length {value_length} is greater than length limit: {_VALIDATE_LABEL_LENGTH_LIMIT} | Current sanitized label: {value}"" ) raise Exception(error_msg) else: return value ","def _sanitize_label(value: str) -> str: """"""Return a legal value for a BigQuery label."""""" value = value.strip().lower() value = _SANITIZE_LABEL_PATTERN.sub(""_"", value) value_length = len(value) if value_length > _VALIDATE_LABEL_LENGTH_LIMIT: error_msg = ( f""Current label length {value_length} is greater than length limit: {_VALIDATE_LABEL_LENGTH_LIMIT} | Current sanitized label: {value}"" ) raise RuntimeException(error_msg) else: return value " 28587,"def plot_parallel( data, var_names=None, filter_vars=None, coords=None, figsize=None, textsize=None, legend=True, colornd=""k"", colord=""C1"", shadend=0.025, labeller=None, ax=None, norm_method=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """""" Plot parallel coordinates plot showing posterior points with and without divergences. Described by https://arxiv.org/abs/1709.01449 Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: list of variable names Variables to be plotted, if `None` all variables are plotted. Can be used to change the order of the plotted variables. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of ``var_names`` to be plotted. Passed to ``Dataset.sel`` figsize: tuple Figure size. If None it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. legend: bool Flag for plotting legend (defaults to True) colornd: valid matplotlib color color for non-divergent points. Defaults to 'k' colord: valid matplotlib color color for divergent points. Defaults to 'C1' shadend: float Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque). Defaults to .025 labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. norm_method: str Method for normalizing the data. Methods include normal, minmax and rank. Defaults to none. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. plot_trace : Plot distribution (histogram or kernel density estimates) and sampled values or rank plot.plot Examples -------- Plot default parallel plot .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_parallel(data, var_names=[""mu"", ""tau""]) Plot parallel plot with normalization .. plot:: :context: close-figs >>> az.plot_parallel(data, var_names=[""mu"", ""tau""], norm_method='normal') """""" if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get diverging draws and combine chains divergent_data = convert_to_dataset(data, group=""sample_stats"") _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(""diverging"",), combined=True) diverging_mask = np.squeeze(diverging_mask) # Get posterior draws and combine chains posterior_data = convert_to_dataset(data, group=""posterior"") var_names = _var_names(var_names, posterior_data, filter_vars) var_names, _posterior = xarray_to_ndarray( get_coords(posterior_data, coords), var_names=var_names, combined=True, label_fun=labeller.make_label_vert, ) if len(var_names) < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") if norm_method is not None: if norm_method == ""normal"": mean = np.mean(_posterior, axis=1) if _posterior.ndim <= 2: standard_deviation = np.sqrt(_numba_var(svar, np.var, _posterior, axis=1)) else: standard_deviation = np.std(_posterior, axis=1) for i in range(0, np.shape(mean)[0]): _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i] elif norm_method == ""minmax"": min_elem = np.min(_posterior, axis=1) max_elem = np.max(_posterior, axis=1) for i in range(0, np.shape(min_elem)[0]): _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i]) elif norm_method == ""rank"": _posterior = rankdata(_posterior, axis=1, method=""average"") else: raise ValueError(f""{norm_method} is not supported. Use normal, minmax or rank."") parallel_kwargs = dict( ax=ax, colornd=colornd, colord=colord, shadend=shadend, diverging_mask=diverging_mask, posterior=_posterior, textsize=textsize, var_names=var_names, legend=legend, figsize=figsize, backend_kwargs=backend_kwargs, backend_config=backend_config, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_parallel"", ""parallelplot"", backend) ax = plot(**parallel_kwargs) return ax ","def plot_parallel( data, var_names=None, filter_vars=None, coords=None, figsize=None, textsize=None, legend=True, colornd=""k"", colord=""C1"", shadend=0.025, labeller=None, ax=None, norm_method=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """""" Plot parallel coordinates plot showing posterior points with and without divergences. Described by https://arxiv.org/abs/1709.01449 Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: list of variable names Variables to be plotted, if `None` all variables are plotted. Can be used to change the order of the plotted variables. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of ``var_names`` to be plotted. Passed to ``Dataset.sel`` figsize: tuple Figure size. If None it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. legend: bool Flag for plotting legend (defaults to True) colornd: valid matplotlib color color for non-divergent points. Defaults to 'k' colord: valid matplotlib color color for divergent points. Defaults to 'C1' shadend: float Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque). Defaults to .025 labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. norm_method: str Method for normalizing the data. Methods include normal, minmax and rank. Defaults to none. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. plot_trace : Plot distribution (histogram or kernel density estimates) and sampled values or rank plot Examples -------- Plot default parallel plot .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_parallel(data, var_names=[""mu"", ""tau""]) Plot parallel plot with normalization .. plot:: :context: close-figs >>> az.plot_parallel(data, var_names=[""mu"", ""tau""], norm_method='normal') """""" if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get diverging draws and combine chains divergent_data = convert_to_dataset(data, group=""sample_stats"") _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(""diverging"",), combined=True) diverging_mask = np.squeeze(diverging_mask) # Get posterior draws and combine chains posterior_data = convert_to_dataset(data, group=""posterior"") var_names = _var_names(var_names, posterior_data, filter_vars) var_names, _posterior = xarray_to_ndarray( get_coords(posterior_data, coords), var_names=var_names, combined=True, label_fun=labeller.make_label_vert, ) if len(var_names) < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") if norm_method is not None: if norm_method == ""normal"": mean = np.mean(_posterior, axis=1) if _posterior.ndim <= 2: standard_deviation = np.sqrt(_numba_var(svar, np.var, _posterior, axis=1)) else: standard_deviation = np.std(_posterior, axis=1) for i in range(0, np.shape(mean)[0]): _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i] elif norm_method == ""minmax"": min_elem = np.min(_posterior, axis=1) max_elem = np.max(_posterior, axis=1) for i in range(0, np.shape(min_elem)[0]): _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i]) elif norm_method == ""rank"": _posterior = rankdata(_posterior, axis=1, method=""average"") else: raise ValueError(f""{norm_method} is not supported. Use normal, minmax or rank."") parallel_kwargs = dict( ax=ax, colornd=colornd, colord=colord, shadend=shadend, diverging_mask=diverging_mask, posterior=_posterior, textsize=textsize, var_names=var_names, legend=legend, figsize=figsize, backend_kwargs=backend_kwargs, backend_config=backend_config, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_parallel"", ""parallelplot"", backend) ax = plot(**parallel_kwargs) return ax " 26825,"def initialize_secrets_backends() -> List[BaseSecretsBackend]: """""" initialize_secrets_backends * import secrets backend classes * instantiate them and return them in a list """""" backend_list = [] custom_secret_backend = get_custom_secret_backend() if custom_secret_backend is not None: backend_list.append(custom_secret_backend) for class_name in DEFAULT_SECRETS_SEARCH_PATH: secrets_backend_cls = import_string(class_name) backend_list.append(secrets_backend_cls()) return backend_list ","def initialize_secrets_backends() -> List[BaseSecretsBackend]: """""" Initialize configured and built-in secrets backends * import secrets backend classes * instantiate them and return them in a list """""" backend_list = [] custom_secret_backend = get_custom_secret_backend() if custom_secret_backend is not None: backend_list.append(custom_secret_backend) for class_name in DEFAULT_SECRETS_SEARCH_PATH: secrets_backend_cls = import_string(class_name) backend_list.append(secrets_backend_cls()) return backend_list " 30264,"def context_create_issue(response, issue): """""" creates GitHub.Issue EntryContext and results to be printed in Demisto Args: response (dict): The raw HTTP response to be inserted to the 'Contents' field issue (dict or list of dicts): A dictionary or a list of dictionaries formatted for Demisto results """""" ec = { 'GitHub.Issue(val.Repository == obj.Repository && val.ID == obj.ID)': issue } return_outputs(tableToMarkdown(""Issue Table"", issue), ec, response) ","def context_create_issue(response, issue): """""" creates GitHub.Issue EntryContext and results to be printed in Demisto Args: response (dict): The raw HTTP response to be inserted to the 'Contents' field issue (dict or list of dicts): A dictionary or a list of dictionaries formatted for Demisto results """""" ec = { 'GitHub.Issue(val.Repository == obj.Repository && val.ID == obj.ID)': issue } return_outputs(tableToMarkdown(""Issue Table"", issue), ec, response) " 28890,"def _augment_from_permissions(cls: Type[PO]) -> Type[PO]: cls.VALID_NAMES = set(Permissions.VALID_FLAGS) aliases = set() # make descriptors for all the valid names and aliases for name, value in Permissions.__dict__.items(): if isinstance(value, permission_alias): key = value.alias aliases.add(name) elif isinstance(value, flag_value): key = name else: continue # god bless Python def getter(self: PO, x: str = key) -> Optional[bool]: return self._values.get(x) def setter(self: PO, value: Optional[bool], x: str = key) -> None: self._set(x, value) prop = property(getter, setter) setattr(cls, name, prop) cls.PURE_FLAGS = cls.VALID_NAMES - aliases return cls ","def _augment_from_permissions(cls): cls.VALID_NAMES = set(Permissions.VALID_FLAGS) aliases = set() # make descriptors for all the valid names and aliases for name, value in Permissions.__dict__.items(): if isinstance(value, permission_alias): key = value.alias aliases.add(name) elif isinstance(value, flag_value): key = name else: continue # god bless Python def getter(self: PO, x: str = key) -> Optional[bool]: return self._values.get(x) def setter(self: PO, value: Optional[bool], x: str = key) -> None: self._set(x, value) prop = property(getter, setter) setattr(cls, name, prop) cls.PURE_FLAGS = cls.VALID_NAMES - aliases return cls " 7246,"def peak_local_max(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, num_peaks_per_label=np.inf): """"""Find peaks in an image as coordinate list or boolean mask. Peaks are the local maxima in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). If there are multiple local maxima with identical pixel intensities inside the region defined with `min_distance`, the coordinates of all such pixels are returned. If both `threshold_abs` and `threshold_rel` are provided, the maximum of the two is chosen as the minimum intensity threshold of peaks. Parameters ---------- image : ndarray Input image. min_distance : int, optional Minimum number of pixels separating peaks in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). To find the maximum number of peaks, use `min_distance=1`. threshold_abs : float, optional Minimum intensity of peaks. By default, the absolute threshold is the minimum intensity of the image. threshold_rel : float, optional Minimum intensity of peaks, calculated as `max(image) * threshold_rel`. exclude_border : int, tuple of ints, or bool, optional If positive integer, `exclude_border` excludes peaks from within `exclude_border`-pixels of the border of the image. If tuple of non-negative ints, the length of the tuple must match the input array's dimensionality. Each element of the tuple will exclude peaks from within `exclude_border`-pixels of the border of the image along that dimension. If True, takes the `min_distance` parameter as value. If zero or False, peaks are identified regardless of their distance from the border. indices : bool, optional If True, the output will be an array representing peak coordinates. The coordinates are sorted according to peaks values (Larger first). If False, the output will be a boolean array shaped as `image.shape` with peaks present at True elements. num_peaks : int, optional Maximum number of peaks. When the number of peaks exceeds `num_peaks`, return `num_peaks` peaks based on highest peak intensity. footprint : ndarray of bools, optional If provided, `footprint == 1` represents the local region within which to search for peaks at every point in `image`. Overrides `min_distance`. labels : ndarray of ints, optional If provided, each unique region `labels == value` represents a unique region to search for peaks. Zero is reserved for background. num_peaks_per_label : int, optional Maximum number of peaks for each label. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. Notes ----- The peak local maximum function returns the coordinates of local peaks (maxima) in an image. A maximum filter is used for finding local maxima. This operation dilates the original image. After comparison of the dilated and original image, this function returns the coordinates or a mask of the peaks where the dilated image equals the original image. See also -------- skimage.feature.corner_peaks Examples -------- >>> img1 = np.zeros((7, 7)) >>> img1[3, 4] = 1 >>> img1[3, 2] = 1.5 >>> img1 array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 1.5, 0. , 1. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ]]) >>> peak_local_max(img1, min_distance=1) array([[3, 2], [3, 4]]) >>> peak_local_max(img1, min_distance=2) array([[3, 2]]) >>> img2 = np.zeros((20, 20, 20)) >>> img2[10, 10, 10] = 1 >>> peak_local_max(img2, exclude_border=0) array([[10, 10, 10]]) """""" out = np.zeros_like(image, dtype=np.bool) threshold_abs = threshold_abs if threshold_abs is not None else image.min() if min_distance < 1: warn(""min_distance lower then 1 is deprecated. "" ""In this case, peak_local_max acts as finding "" ""image > max(threshold_abs, threshold_rel). In version 0.20 "" ""this warning will be replaced by a ValueError."", FutureWarning, stacklevel=2) if footprint is not None and footprint.size == 1: warn(""footprint.size lower then 2 is deprecated. "" ""In this cases, peak_local_max acts as finding "" ""image > max(threshold_abs, threshold_rel). In version 0.20 "" ""this warning will be replaced by a ValueError."", FutureWarning, stacklevel=2) if isinstance(exclude_border, bool): exclude_border = (min_distance if exclude_border else 0,) * image.ndim elif isinstance(exclude_border, int): if exclude_border < 0: raise ValueError(""`exclude_border` cannot be a negative value"") exclude_border = (exclude_border,) * image.ndim elif isinstance(exclude_border, tuple): if len(exclude_border) != image.ndim: raise ValueError( ""`exclude_border` should have the same length as the "" ""dimensionality of the image."") for exclude in exclude_border: if not isinstance(exclude, int): raise ValueError( ""`exclude_border`, when expressed as a tuple, must only "" ""contain ints."" ) if exclude < 0: raise ValueError( ""`exclude_border` cannot contain a negative value"") else: raise TypeError( ""`exclude_border` must be bool, int, or tuple with the same "" ""length as the dimensionality of the image."") # no peak for a trivial image if np.all(image == image.flat[0]): if indices is True: return np.empty((0, image.ndim), np.int) else: return out # In the case of labels, call ndi on each label if labels is not None: label_values = np.unique(labels) # Reorder label values to have consecutive integers (no gaps) if np.any(np.diff(label_values) != 1): mask = labels >= 1 labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype) labels = labels.astype(np.int32) # create a mask for the non-exclude region inner_mask = _exclude_border(np.ones_like(labels, dtype=bool), exclude_border) # For each label, extract a smaller image enclosing the object of # interest, identify num_peaks_per_label peaks and mark them in # variable out. for label_idx, obj in enumerate(ndi.find_objects(labels)): img_object = image[obj] * (labels[obj] == label_idx + 1) mask = _get_peak_mask(img_object, min_distance, footprint, threshold_abs, threshold_rel) if exclude_border: # remove peaks fall in the exclude region mask &= inner_mask[obj] coordinates = _get_high_intensity_peaks(img_object, mask, num_peaks_per_label) nd_indices = tuple(coordinates.T) mask.fill(False) mask[nd_indices] = True out[obj] += mask if not indices and np.isinf(num_peaks): return out coordinates = _get_high_intensity_peaks(image, out, num_peaks) if indices: return coordinates else: out.fill(False) nd_indices = tuple(coordinates.T) out[nd_indices] = True return out # Non maximum filter mask = _get_peak_mask(image, min_distance, footprint, threshold_abs, threshold_rel) mask = _exclude_border(mask, exclude_border) # Select highest intensities (num_peaks) coordinates = _get_high_intensity_peaks(image, mask, num_peaks) if indices is True: return coordinates else: nd_indices = tuple(coordinates.T) out[nd_indices] = True return out ","def peak_local_max(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, num_peaks_per_label=np.inf): """"""Find peaks in an image as coordinate list or boolean mask. Peaks are the local maxima in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). If there are multiple local maxima with identical pixel intensities inside the region defined with `min_distance`, the coordinates of all such pixels are returned. If both `threshold_abs` and `threshold_rel` are provided, the maximum of the two is chosen as the minimum intensity threshold of peaks. Parameters ---------- image : ndarray Input image. min_distance : int, optional Minimum number of pixels separating peaks in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). To find the maximum number of peaks, use `min_distance=1`. threshold_abs : float, optional Minimum intensity of peaks. By default, the absolute threshold is the minimum intensity of the image. threshold_rel : float, optional Minimum intensity of peaks, calculated as `max(image) * threshold_rel`. exclude_border : int, tuple of ints, or bool, optional If positive integer, `exclude_border` excludes peaks from within `exclude_border`-pixels of the border of the image. If tuple of non-negative ints, the length of the tuple must match the input array's dimensionality. Each element of the tuple will exclude peaks from within `exclude_border`-pixels of the border of the image along that dimension. If True, takes the `min_distance` parameter as value. If zero or False, peaks are identified regardless of their distance from the border. indices : bool, optional If True, the output will be an array representing peak coordinates. The coordinates are sorted according to peaks values (Larger first). If False, the output will be a boolean array shaped as `image.shape` with peaks present at True elements. num_peaks : int, optional Maximum number of peaks. When the number of peaks exceeds `num_peaks`, return `num_peaks` peaks based on highest peak intensity. footprint : ndarray of bools, optional If provided, `footprint == 1` represents the local region within which to search for peaks at every point in `image`. Overrides `min_distance`. labels : ndarray of ints, optional If provided, each unique region `labels == value` represents a unique region to search for peaks. Zero is reserved for background. num_peaks_per_label : int, optional Maximum number of peaks for each label. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. Notes ----- The peak local maximum function returns the coordinates of local peaks (maxima) in an image. A maximum filter is used for finding local maxima. This operation dilates the original image. After comparison of the dilated and original image, this function returns the coordinates or a mask of the peaks where the dilated image equals the original image. See also -------- skimage.feature.corner_peaks Examples -------- >>> img1 = np.zeros((7, 7)) >>> img1[3, 4] = 1 >>> img1[3, 2] = 1.5 >>> img1 array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 1.5, 0. , 1. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0. , 0. , 0. , 0. , 0. , 0. , 0. ]]) >>> peak_local_max(img1, min_distance=1) array([[3, 2], [3, 4]]) >>> peak_local_max(img1, min_distance=2) array([[3, 2]]) >>> img2 = np.zeros((20, 20, 20)) >>> img2[10, 10, 10] = 1 >>> peak_local_max(img2, exclude_border=0) array([[10, 10, 10]]) """""" out = np.zeros_like(image, dtype=np.bool) threshold_abs = threshold_abs if threshold_abs is not None else image.min() if min_distance < 1: warn(""min_distance lower than 1 is deprecated. "" ""In this case, peak_local_max acts as finding "" ""image > max(threshold_abs, threshold_rel). In version 0.20 "" ""this warning will be replaced by a ValueError."", FutureWarning, stacklevel=2) if footprint is not None and footprint.size == 1: warn(""footprint.size lower then 2 is deprecated. "" ""In this cases, peak_local_max acts as finding "" ""image > max(threshold_abs, threshold_rel). In version 0.20 "" ""this warning will be replaced by a ValueError."", FutureWarning, stacklevel=2) if isinstance(exclude_border, bool): exclude_border = (min_distance if exclude_border else 0,) * image.ndim elif isinstance(exclude_border, int): if exclude_border < 0: raise ValueError(""`exclude_border` cannot be a negative value"") exclude_border = (exclude_border,) * image.ndim elif isinstance(exclude_border, tuple): if len(exclude_border) != image.ndim: raise ValueError( ""`exclude_border` should have the same length as the "" ""dimensionality of the image."") for exclude in exclude_border: if not isinstance(exclude, int): raise ValueError( ""`exclude_border`, when expressed as a tuple, must only "" ""contain ints."" ) if exclude < 0: raise ValueError( ""`exclude_border` cannot contain a negative value"") else: raise TypeError( ""`exclude_border` must be bool, int, or tuple with the same "" ""length as the dimensionality of the image."") # no peak for a trivial image if np.all(image == image.flat[0]): if indices is True: return np.empty((0, image.ndim), np.int) else: return out # In the case of labels, call ndi on each label if labels is not None: label_values = np.unique(labels) # Reorder label values to have consecutive integers (no gaps) if np.any(np.diff(label_values) != 1): mask = labels >= 1 labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype) labels = labels.astype(np.int32) # create a mask for the non-exclude region inner_mask = _exclude_border(np.ones_like(labels, dtype=bool), exclude_border) # For each label, extract a smaller image enclosing the object of # interest, identify num_peaks_per_label peaks and mark them in # variable out. for label_idx, obj in enumerate(ndi.find_objects(labels)): img_object = image[obj] * (labels[obj] == label_idx + 1) mask = _get_peak_mask(img_object, min_distance, footprint, threshold_abs, threshold_rel) if exclude_border: # remove peaks fall in the exclude region mask &= inner_mask[obj] coordinates = _get_high_intensity_peaks(img_object, mask, num_peaks_per_label) nd_indices = tuple(coordinates.T) mask.fill(False) mask[nd_indices] = True out[obj] += mask if not indices and np.isinf(num_peaks): return out coordinates = _get_high_intensity_peaks(image, out, num_peaks) if indices: return coordinates else: out.fill(False) nd_indices = tuple(coordinates.T) out[nd_indices] = True return out # Non maximum filter mask = _get_peak_mask(image, min_distance, footprint, threshold_abs, threshold_rel) mask = _exclude_border(mask, exclude_border) # Select highest intensities (num_peaks) coordinates = _get_high_intensity_peaks(image, mask, num_peaks) if indices is True: return coordinates else: nd_indices = tuple(coordinates.T) out[nd_indices] = True return out " 38684,"def attr_validator(validate_fn): '''Validate object attributes recursively. This returns a function which you can call with the object to check. It will return :class:`True` if the :func:`validate_fn` returns :class:`True` for all object attributes recursively. If the object to be validate is an iterable, its elements will be validated individually. :arg validate_fn: A callable that validates an object. It takes a single argument, which is the object to validate. :returns: A validation function that will perform the actual validation. It accepts a single argument, which is the object to validate. It returns a two-element tuple, containing the result of the validation as a boolean and a formatted string indicating the faulty attribute. .. note:: Objects defining :attr:`__slots__` are passed directly to the ``validate_fn`` function. .. versionadded:: 3.3 ''' # Already visited objects visited = set() depth = 0 def _do_validate(obj, path=None): def _fmt(path): ret = '' for p in path: t, name = p if t == 'A': ret += f'.{name}' elif t == 'I': ret += f'[{name}]' elif t == 'K': ret += f'[{name!r}]' # Remove leading '.' return ret[1:] if ret[0] == '.' else ret nonlocal depth def _clean_cache(): nonlocal depth depth -= 1 if depth == 0: # We are exiting the top-level call visited.clear() depth += 1 visited.add(id(obj)) if path is None: path = [('A', type(obj).__name__)] if isinstance(obj, dict): for k, v in obj.items(): if id(v) in visited: continue path.append(('K', k)) valid, _ = _do_validate(v, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) if (isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set)): for i, x in enumerate(obj): if id(x) in visited: continue path.append(('I', i)) valid, _ = _do_validate(x, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) valid = validate_fn(obj) if not valid: _clean_cache() return False, _fmt(path) # Stop here if obj is a built-in type if isinstance(obj, type) and _is_builtin_type(obj): return True, _fmt(path) if hasattr(obj, '__dict__'): for k, v in obj.__dict__.items(): if id(v) in visited: continue path.append(('A', k)) valid, _ = _do_validate(v, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) return _do_validate ","def attr_validator(validate_fn): '''Validate object attributes recursively. This returns a function which you can call with the object to check. It will return :class:`True` if the :func:`validate_fn` returns :class:`True` for all object attributes recursively. If the object to be validated is an iterable, its elements will be validated individually. :arg validate_fn: A callable that validates an object. It takes a single argument, which is the object to validate. :returns: A validation function that will perform the actual validation. It accepts a single argument, which is the object to validate. It returns a two-element tuple, containing the result of the validation as a boolean and a formatted string indicating the faulty attribute. .. note:: Objects defining :attr:`__slots__` are passed directly to the ``validate_fn`` function. .. versionadded:: 3.3 ''' # Already visited objects visited = set() depth = 0 def _do_validate(obj, path=None): def _fmt(path): ret = '' for p in path: t, name = p if t == 'A': ret += f'.{name}' elif t == 'I': ret += f'[{name}]' elif t == 'K': ret += f'[{name!r}]' # Remove leading '.' return ret[1:] if ret[0] == '.' else ret nonlocal depth def _clean_cache(): nonlocal depth depth -= 1 if depth == 0: # We are exiting the top-level call visited.clear() depth += 1 visited.add(id(obj)) if path is None: path = [('A', type(obj).__name__)] if isinstance(obj, dict): for k, v in obj.items(): if id(v) in visited: continue path.append(('K', k)) valid, _ = _do_validate(v, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) if (isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set)): for i, x in enumerate(obj): if id(x) in visited: continue path.append(('I', i)) valid, _ = _do_validate(x, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) valid = validate_fn(obj) if not valid: _clean_cache() return False, _fmt(path) # Stop here if obj is a built-in type if isinstance(obj, type) and _is_builtin_type(obj): return True, _fmt(path) if hasattr(obj, '__dict__'): for k, v in obj.__dict__.items(): if id(v) in visited: continue path.append(('A', k)) valid, _ = _do_validate(v, path) if not valid: _clean_cache() return False, _fmt(path) path.pop() _clean_cache() return True, _fmt(path) return _do_validate " 53487,"def is_flying_animal(an_object): is_flying = isinstance(an_object, Animal) and an_object.name in FLYING_THINGS return is_flying, not is_flying ","def is_flying_animal(an_object): is_flying = isinstance(an_object, Animal) and an_object.name in FLYING_THINGS is_not_flying = not is_flying return is_flying, is_not_flying " 3588,"def cancel_build(build): """""" Cancel a triggered/running build. Depending on the current state of the build, it takes one approach or the other: - Triggered: update the build status and tells Celery to revoke this task. Workers will know about this and will discard it. - Running: communicate Celery to force the termination of the current build and relies on the worker to update the build's status correct. """""" # NOTE: `terminate=True` is required for the child to attend our call # immediately when it's running the build. Otherwise, it finishes the # task. However, to revoke a task that has not started yet, we don't # need it. if build.state == BUILD_STATE_TRIGGERED: # Since the task won't be executed at all, we need to update the # Build object here. terminate = False build.state = BUILD_STATE_CANCELLED build.success = False build.error = BuildCancelled.message build.length = 0 build.save() else: # In this case, we left the update of the Build object to the task # itself to be executed in the `on_failure` handler. terminate = True log.warning( ""Canceling build."", project_slug=build.project.slug, version_slug=build.version.slug, build_id=build.pk, build_task_id=build.task_id, terminate=terminate, ) app.control.revoke(build.task_id, signal=signal.SIGINT, terminate=terminate) ","def cancel_build(build): """""" Cancel a triggered/running build. Depending on the current state of the build, it takes one approach or the other: - Triggered: update the build status and tells Celery to revoke this task. Workers will know about this and will discard it. - Running: communicate Celery to force the termination of the current build and rely on the worker to update the build's status. """""" # NOTE: `terminate=True` is required for the child to attend our call # immediately when it's running the build. Otherwise, it finishes the # task. However, to revoke a task that has not started yet, we don't # need it. if build.state == BUILD_STATE_TRIGGERED: # Since the task won't be executed at all, we need to update the # Build object here. terminate = False build.state = BUILD_STATE_CANCELLED build.success = False build.error = BuildCancelled.message build.length = 0 build.save() else: # In this case, we left the update of the Build object to the task # itself to be executed in the `on_failure` handler. terminate = True log.warning( ""Canceling build."", project_slug=build.project.slug, version_slug=build.version.slug, build_id=build.pk, build_task_id=build.task_id, terminate=terminate, ) app.control.revoke(build.task_id, signal=signal.SIGINT, terminate=terminate) " 30260,"def execute_query(data_args): req_id = ''.join(random.choice(string.ascii_letters) for x in range(8)) start, end = get_time_frame(data_args.get('time-frame'), data_args.get('start-date'), data_args.get('end-date')) delta = end - start dates = [] for i in range(delta.days + 1): dates.append((start + timedelta(days=i)).strftime(""logs-%Y-%m-%d"")) data = { ""indices"": dates, ""searchType"": ""DFS_QUERY_THEN_FETCH"", ""source"": { ""size"": data_args.get('page-size'), ""query"": { ""query_string"": { ""default_field"": ""logMessage"", ""query"": data_args.get('keyword') } }, ""stored_fields"": ""logMessage"", ""sort"": [ { ""normalDate"": { ""order"": ""asc"" } } ] } } headers = HEADERS headers['Content-Type'] = 'application/json' headers['Request-Id'] = req_id headers['Request-Origin-Date'] = str(datetime.now()) headers['x-gateway-route-to-tag'] = CLUSTER_ID res = http_request('POST', 'lr-legacy-search-api/esquery', json.dumps(data), headers) logs = res['hits']['hits'] logs_response = [] xml_ns = './/{http://schemas.microsoft.com/win/2004/08/events/event}' for log in logs: message = str(log['fields']['logMessage']) message = message[:-2][3:] try: root = ET.fromstring(message) log_item = { ""EventID"": str(root.find(xml_ns + 'EventID').text), # type: ignore ""Level"": str(root.find(xml_ns + 'Level').text), # type: ignore ""Task"": str(root.find(xml_ns + 'Task').text), # type: ignore ""Opcode"": str(root.find(xml_ns + 'Opcode').text), # type: ignore ""Keywords"": str(root.find(xml_ns + 'Keywords').text), # type: ignore ""Channel"": str(root.find(xml_ns + 'Channel').text), # type: ignore ""Computer"": str(root.find(xml_ns + 'Computer').text), # type: ignore ""EventData"": str(root.find(xml_ns + 'EventData').text) # type: ignore .replace('\\r\\n', '\n').replace('\\t', '\t') } logs_response.append(log_item) except Exception: continue context = createContext(logs_response, removeNull=True) human_readable = tableToMarkdown('logs results', logs_response, LOGS_HEASERS) outputs = {'Logrhythm.Logs': context} return_outputs(readable_output=human_readable, outputs=outputs, raw_response=logs_response) ","def execute_query(data_args): req_id = ''.join(random.choice(string.ascii_letters) for x in range(8)) start, end = get_time_frame(data_args.get('time-frame'), data_args.get('start-date'), data_args.get('end-date')) delta = end - start dates = [] for i in range(delta.days + 1): dates.append((start + timedelta(days=i)).strftime(""logs-%Y-%m-%d"")) data = { ""indices"": dates, ""searchType"": ""DFS_QUERY_THEN_FETCH"", ""source"": { ""size"": data_args.get('page-size'), ""query"": { ""query_string"": { ""default_field"": ""logMessage"", ""query"": data_args.get('keyword') } }, ""stored_fields"": ""logMessage"", ""sort"": [ { ""normalDate"": { ""order"": ""asc"" } } ] } } headers = HEADERS headers['Content-Type'] = 'application/json' headers['Request-Id'] = req_id headers['Request-Origin-Date'] = str(datetime.now()) headers['x-gateway-route-to-tag'] = CLUSTER_ID res = http_request('POST', 'lr-legacy-search-api/esquery', json.dumps(data), headers) logs = res['hits']['hits'] logs_response = [] xml_ns = './/{http://schemas.microsoft.com/win/2004/08/events/event}' for log in logs: message = str(log['fields']['logMessage']) message = message[3:-2] try: root = ET.fromstring(message) log_item = { ""EventID"": str(root.find(xml_ns + 'EventID').text), # type: ignore ""Level"": str(root.find(xml_ns + 'Level').text), # type: ignore ""Task"": str(root.find(xml_ns + 'Task').text), # type: ignore ""Opcode"": str(root.find(xml_ns + 'Opcode').text), # type: ignore ""Keywords"": str(root.find(xml_ns + 'Keywords').text), # type: ignore ""Channel"": str(root.find(xml_ns + 'Channel').text), # type: ignore ""Computer"": str(root.find(xml_ns + 'Computer').text), # type: ignore ""EventData"": str(root.find(xml_ns + 'EventData').text) # type: ignore .replace('\\r\\n', '\n').replace('\\t', '\t') } logs_response.append(log_item) except Exception: continue context = createContext(logs_response, removeNull=True) human_readable = tableToMarkdown('logs results', logs_response, LOGS_HEASERS) outputs = {'Logrhythm.Logs': context} return_outputs(readable_output=human_readable, outputs=outputs, raw_response=logs_response) " 27733,"def test_plain_unittest_does_not_support_async(testdir): """""" Async functions in plain unittest.TestCase subclasses are not supported without plugins. This test exists here to avoid introducing this support by accident, leading users to expect that it works, rather than doing so intentionally as a feature. See https://github.com/pytest-dev/pytest-asyncio/issues/180 for more context. """""" testdir.copy_example(""unittest/test_unittest_plain_async.py"") result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines( [""*RuntimeWarning: coroutine * was never awaited"", ""*1 passed*""] ) ","def test_plain_unittest_does_not_support_async(testdir): """"""Async functions in plain unittest.TestCase subclasses are not supported without plugins. This test exists here to avoid introducing this support by accident, leading users to expect that it works, rather than doing so intentionally as a feature. See https://github.com/pytest-dev/pytest-asyncio/issues/180 for more context. """""" testdir.copy_example(""unittest/test_unittest_plain_async.py"") result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines( [""*RuntimeWarning: coroutine * was never awaited"", ""*1 passed*""] ) " 52737,"def espace_slash(string): """"""This function transforms '\/' in '/'' if '\' was not escaped """""" # noqa: W605 escaping = False new_string = """" for char in string: if not escaping and char == '\\': escaping = True elif escaping and char != '/': new_string += '\\' + char escaping = False else: new_string += char escaping = False return new_string ","def espace_slash(string): """"""This function transforms '\\/' in '/' but leaves '\\\\/' unchanged. This is useful to parse regexps from JavaScript style (/regexp/). """""" # noqa: W605 escaping = False new_string = """" for char in string: if not escaping and char == '\\': escaping = True elif escaping and char != '/': new_string += '\\' + char escaping = False else: new_string += char escaping = False return new_string " 34879,"def on_device(data, device): """"""Annotate a tensor with device id. Parameters ---------- data : tvm.relay.Expr The tensor to be annotated. device : Union(:py:class:`TVMContext`, str) The device that the tensor is annotated with. Returns ------- result : tvm.relay.Expr The annotated tensor. """""" if isinstance(device, _TVMContext): device = device.device_type elif isinstance(device, str): device = _nd.context(device).device_type else: raise ValueError(""device is expected to be the type of TVMContext or "" ""str, but received %s"" % (type(device))) return _make.on_device(data, device) ","def on_device(data, device): """"""Annotate an expression with device id. Parameters ---------- data : tvm.relay.Expr The tensor to be annotated. device : Union(:py:class:`TVMContext`, str) The device that the tensor is annotated with. Returns ------- result : tvm.relay.Expr The annotated tensor. """""" if isinstance(device, _TVMContext): device = device.device_type elif isinstance(device, str): device = _nd.context(device).device_type else: raise ValueError(""device is expected to be the type of TVMContext or "" ""str, but received %s"" % (type(device))) return _make.on_device(data, device) " 36380,"def rmtree(path, ignore_errors=False, onerror=None): """"""Recursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is platform and implementation dependent; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. """""" if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if not os.path.exists(path): raise FileNotFoundError(""Cannot call rmtree on a non-existent path"") except: onerror(os.path.exists, path, sys.exc_info()) return if _use_fd_functions: # While the unsafe rmtree works fine on bytes, the fd based does not. if isinstance(path, bytes): path = os.fsdecode(path) # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. try: orig_st = os.lstat(path) except Exception: onerror(os.lstat, path, sys.exc_info()) return try: fd = os.open(path, os.O_RDONLY) except Exception: onerror(os.lstat, path, sys.exc_info()) return try: if os.path.samestat(orig_st, os.fstat(fd)): _rmtree_safe_fd(fd, path, onerror) try: os.rmdir(path) except OSError: onerror(os.rmdir, path, sys.exc_info()) else: try: # symlinks to directories are forbidden, see bug #1669 raise OSError(""Cannot call rmtree on a symbolic link"") except OSError: onerror(os.path.islink, path, sys.exc_info()) finally: os.close(fd) else: try: if os.path.islink(path): # symlinks to directories are forbidden, see bug #1669 raise OSError(""Cannot call rmtree on a symbolic link"") except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return return _rmtree_unsafe(path, onerror) ","def rmtree(path, ignore_errors=False, onerror=None): """"""Recursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is platform and implementation dependent; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. """""" if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if not os.path.exists(path): raise FileNotFoundError(""Cannot call rmtree on a non-existent path"") except FileNotFoundError: onerror(os.path.exists, path, sys.exc_info()) return if _use_fd_functions: # While the unsafe rmtree works fine on bytes, the fd based does not. if isinstance(path, bytes): path = os.fsdecode(path) # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. try: orig_st = os.lstat(path) except Exception: onerror(os.lstat, path, sys.exc_info()) return try: fd = os.open(path, os.O_RDONLY) except Exception: onerror(os.lstat, path, sys.exc_info()) return try: if os.path.samestat(orig_st, os.fstat(fd)): _rmtree_safe_fd(fd, path, onerror) try: os.rmdir(path) except OSError: onerror(os.rmdir, path, sys.exc_info()) else: try: # symlinks to directories are forbidden, see bug #1669 raise OSError(""Cannot call rmtree on a symbolic link"") except OSError: onerror(os.path.islink, path, sys.exc_info()) finally: os.close(fd) else: try: if os.path.islink(path): # symlinks to directories are forbidden, see bug #1669 raise OSError(""Cannot call rmtree on a symbolic link"") except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return return _rmtree_unsafe(path, onerror) " 5368,"def test_new_key_url(): """""" Test when only the key_url is changed that a change is triggered """""" kwargs = { ""name"": ""deb http://mock/ sid main"", ""disabled"": False, } key_url = ""http://mock/changed_gpg.key"" with patch.dict(pkgrepo.__salt__, {""pkg.get_repo"": MagicMock(return_value=kwargs)}): ret = pkgrepo.managed(key_url=key_url, **kwargs) assert {""key_url"": {""old"": None, ""new"": key_url}} == ret[""changes""] ","def test_new_key_url(): """""" Test when only the key_url is changed that a change is triggered """""" kwargs = { ""name"": ""deb http://mock/ sid main"", ""disabled"": False, } key_url = ""http://mock/changed_gpg.key"" with patch.dict(pkgrepo.__salt__, {""pkg.get_repo"": MagicMock(return_value=kwargs)}): ret = pkgrepo.managed(key_url=key_url, **kwargs) assert ret[""changes""] == {""key_url"": {""old"": None, ""new"": key_url}} " 42630,"def get_bisq_market_price(asset: Asset) -> Price: """""" Get price for pair at bisq marketplace. Price is returned agains BTC. Can raise: - RemoteError: If the market doesn't exists or request fails - DeserializationError: If the data returned is not a valid price """""" symbol = asset.symbol url = PRICE_API_URL.format(symbol=symbol) try: response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE) except requests.exceptions.RequestException as e: raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e data = response.json() if 'error' in data: raise RemoteError(f'Request data from bisq.markets {url} is not valid {data[""error""]}') return deserialize_price(data['last']) ","def get_bisq_market_price(asset: Asset) -> Price: """""" Get price for pair at bisq marketplace. Price is returned against BTC. Can raise: - RemoteError: If the market doesn't exists or request fails - DeserializationError: If the data returned is not a valid price """""" symbol = asset.symbol url = PRICE_API_URL.format(symbol=symbol) try: response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE) except requests.exceptions.RequestException as e: raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e data = response.json() if 'error' in data: raise RemoteError(f'Request data from bisq.markets {url} is not valid {data[""error""]}') return deserialize_price(data['last']) " 24863,"def my_func(self): """"""This is a docstring. Returns: mymodule.Class: An object """""" return mymodule.Class() ","def my_func(self): """"""finds_google_return_custom_class Returns: mymodule.Class: An object """""" return mymodule.Class() " 23324,"def main(): """"""Parse args then run the pytest suite for Spyder."""""" test_parser = argparse.ArgumentParser( description=""Helper script to run Spyder's test suite"") test_parser.add_argument('--run-slow', action='store_true', default=False, help='Run the slow tests') test_parser.add_argument('pytest_args', nargs=argparse.REMAINDER, metavar=""..."", help=""Args to pass to pytest"") test_args = test_parser.parse_args() run_pytest(run_slow=test_args.run_slow, extra_args=test_args.pytest_args) ","def main(): """"""Parse args then run the pytest suite for Spyder."""""" test_parser = argparse.ArgumentParser( description=""Helper script to run Spyder's test suite"") test_parser.add_argument('--run-slow', action='store_true', default=False, help='Run the slow tests') test_parser.add_argument('pytest_args', nargs=argparse.REMAINDER, metavar=""..."", help=""Args to pass to pytest"") test_args, pytest_args = test_parser.parse_known_args() run_pytest(run_slow=test_args.run_slow, extra_args=test_args.pytest_args) " 20547,"def get_parser(): param = Param() parser = SCTArgumentParser( description=( ""Register an anatomical image to the spinal cord MRI template (default: PAM50).\n"" ""\n"" ""The registration process includes three main registration steps:\n"" "" 1. straightening of the image using the spinal cord segmentation (see sct_straighten_spinalcord for "" ""details);\n"" "" 2. vertebral alignment between the image and the template, using labels along the spine;\n"" "" 3. iterative slice-wise non-linear registration (see sct_register_multimodal for details)\n"" ""\n"" ""To register a subject to the template, try the default command:\n"" "" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz\n"" ""\n"" ""If this default command does not produce satisfactory results, the '-param' "" ""argument should be tweaked according to the tips given here:\n"" "" https://spinalcordtoolbox.com/en/latest/user_section/command-line.html#sct-register-multimodal\n"" ""\n"" ""The default registration method brings the subject image to the template, which can be problematic with "" ""highly non-isotropic images as it would induce large interpolation errors during the straightening "" ""procedure. Although the default method is recommended, you may want to register the template to the "" ""subject (instead of the subject to the template) by skipping the straightening procedure. To do so, use "" ""the parameter '-ref subject'. Example below:\n"" "" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz -ref subject -param "" ""step=1,type=seg,algo=centermassrot,smooth=0:step=2,type=seg,algo=columnwise,smooth=0,smoothWarpXY=2\n"" ""\n"" ""Vertebral alignment (step 2) consists in aligning the vertebrae between the subject and the template. "" ""Two types of labels are possible:\n"" "" - Vertebrae mid-body labels, created at the center of the spinal cord using the parameter '-l';\n"" "" - Posterior edge of the intervertebral discs, using the parameter '-ldisc'.\n"" ""\n"" ""If only one label is provided, a simple translation will be applied between the subject label and the "" ""template label. No scaling will be performed. \n"" ""\n"" ""If two labels are provided, a linear transformation (translation + rotation + superior-inferior linear "" ""scaling) will be applied. The strategy here is to define labels that cover the region of interest. For "" ""example, if you are interested in studying C2 to C6 levels, then provide one label at C2 and another at "" ""C6. However, note that if the two labels are very far apart (e.g. C2 and T12), there might be a "" ""mis-alignment of discs because a subject''s intervertebral discs distance might differ from that of the "" ""template.\n"" ""\n"" ""If more than two labels are used, a non-linear registration will be applied to align the each "" ""intervertebral disc between the subject and the template, as described in "" ""sct_straighten_spinalcord. This the most accurate and preferred method. Note: \n"" "" - Using more than two labels will only work with disc labels (i.e. '-ldisc')\n"" "" - This feature is not compatible with the parameter '-ref subject', where only a rigid registration is performed.\n"" "" - Due to the non-linear registration in the S-I direction, the warping field will be cropped above the top label and below the bottom label. Applying this warping field will result in a strange-looking registered image that has the same value above the top label and below the bottom label. But if you are not interested in these regions, you do not need to worry about it.\n"" ""\n"" ""More information about label creation can be found at "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) ) mandatory = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatory.add_argument( '-i', metavar=Metavar.file, required=True, help=""Input anatomical image. Example: anat.nii.gz"" ) mandatory.add_argument( '-s', metavar=Metavar.file, required=True, help=""Spinal cord segmentation. Example: anat_seg.nii.gz"" ) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit."" ) optional.add_argument( '-s-template-id', metavar=Metavar.int, type=int, help=""Segmentation file ID to use for registration. The ID is an integer indicated in the file "" ""'template/info_label.txt'. This 'info_label.txt' file corresponds to the template indicated by the flag "" ""'-t'. By default, the spinal cord segmentation is used (ID=3), but if available, a different segmentation"" "" such as white matter segmentation could produce better registration results."", default=3 ) optional.add_argument( '-l', metavar=Metavar.file, help=""One or two labels (preferred) located at the center of the spinal cord, on the mid-vertebral slice. "" ""Example: anat_labels.nii.gz\n"" ""For more information about label creation, please see: "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) optional.add_argument( '-ldisc', metavar=Metavar.file, help=""File containing disc labels. Labels can be located either at the posterior edge "" ""of the intervertebral discs, or at the orthogonal projection of each disc onto "" ""the spinal cord (e.g.: the file 'xxx_seg_labeled_discs.nii.gz' output by sct_label_vertebrae).\n"" ""If you are using more than 2 labels, all discs covering the region of interest should be provided. "" ""E.g., if you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,6,7. "" ""For more information about label creation, please refer to "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) optional.add_argument( '-lspinal', metavar=Metavar.file, help=""Labels located in the center of the spinal cord, at the superior-inferior level corresponding to the "" ""mid-point of the spinal level. Example: anat_labels.nii.gz\n"" ""Each label is a single voxel, which value corresponds to the spinal level (e.g.: 2 for spinal level 2). "" ""If you are using more than 2 labels, all spinal levels covering the region of interest should be "" ""provided (e.g., if you are interested in levels C2 to C7, then you should provide spinal level labels "" ""2,3,4,5,6,7)."" ) optional.add_argument( '-ofolder', metavar=Metavar.folder, action=ActionCreateFolder, help=""Output folder."" ) optional.add_argument( '-t', metavar=Metavar.folder, default=param.path_template, help=""Path to template"" ) optional.add_argument( '-c', choices=['t1', 't2', 't2s'], default='t2', help=""Contrast to use for registration."" ) optional.add_argument( '-ref', choices=['template', 'subject'], default='template', help=""Reference for registration: template: subject->template, subject: template->subject."" ) optional.add_argument( '-param', metavar=Metavar.list, type=list_type(':', str), help=(f""Parameters for registration (see sct_register_multimodal). Default:"" f""\n"" f""step=0\n"" f"" - type={paramregmulti.steps['0'].type}\n"" f"" - dof={paramregmulti.steps['0'].dof}\n"" f""\n"" f""step=1\n"" f"" - type={paramregmulti.steps['1'].type}\n"" f"" - algo={paramregmulti.steps['1'].algo}\n"" f"" - metric={paramregmulti.steps['1'].metric}\n"" f"" - iter={paramregmulti.steps['1'].iter}\n"" f"" - smooth={paramregmulti.steps['1'].smooth}\n"" f"" - gradStep={paramregmulti.steps['1'].gradStep}\n"" f"" - slicewise={paramregmulti.steps['1'].slicewise}\n"" f"" - smoothWarpXY={paramregmulti.steps['1'].smoothWarpXY}\n"" f"" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}\n"" f""\n"" f""step=2\n"" f"" - type={paramregmulti.steps['2'].type}\n"" f"" - algo={paramregmulti.steps['2'].algo}\n"" f"" - metric={paramregmulti.steps['2'].metric}\n"" f"" - iter={paramregmulti.steps['2'].iter}\n"" f"" - smooth={paramregmulti.steps['2'].smooth}\n"" f"" - gradStep={paramregmulti.steps['2'].gradStep}\n"" f"" - slicewise={paramregmulti.steps['2'].slicewise}\n"" f"" - smoothWarpXY={paramregmulti.steps['2'].smoothWarpXY}\n"" f"" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}"") ) optional.add_argument( '-centerline-algo', choices=['polyfit', 'bspline', 'linear', 'nurbs'], default=ParamCenterline().algo_fitting, help=""Algorithm for centerline fitting (when straightening the spinal cord)."" ) optional.add_argument( '-centerline-smooth', metavar=Metavar.int, type=int, default=ParamCenterline().smooth, help=""Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}."" ) optional.add_argument( '-qc', metavar=Metavar.folder, action=ActionCreateFolder, default=param.path_qc, help=""The path where the quality control generated content will be saved."" ) optional.add_argument( '-qc-dataset', metavar=Metavar.str, help=""If provided, this string will be mentioned in the QC report as the dataset the process was run on."" ) optional.add_argument( '-qc-subject', metavar=Metavar.str, help=""If provided, this string will be mentioned in the QC report as the subject the process was run on."" ) optional.add_argument( '-igt', metavar=Metavar.file, help=""File name of ground-truth template cord segmentation (binary nifti)."" ) optional.add_argument( '-r', metavar=Metavar.int, type=int, choices=[0, 1], default=param.remove_temp_files, help=""Whether to remove temporary files. 0 = no, 1 = yes"" ) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"" ) return parser ","def get_parser(): param = Param() parser = SCTArgumentParser( description=( ""Register an anatomical image to the spinal cord MRI template (default: PAM50).\n"" ""\n"" ""The registration process includes three main registration steps:\n"" "" 1. straightening of the image using the spinal cord segmentation (see sct_straighten_spinalcord for "" ""details);\n"" "" 2. vertebral alignment between the image and the template, using labels along the spine;\n"" "" 3. iterative slice-wise non-linear registration (see sct_register_multimodal for details)\n"" ""\n"" ""To register a subject to the template, try the default command:\n"" "" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz\n"" ""\n"" ""If this default command does not produce satisfactory results, the '-param' "" ""argument should be tweaked according to the tips given here:\n"" "" https://spinalcordtoolbox.com/en/latest/user_section/command-line.html#sct-register-multimodal\n"" ""\n"" ""The default registration method brings the subject image to the template, which can be problematic with "" ""highly non-isotropic images as it would induce large interpolation errors during the straightening "" ""procedure. Although the default method is recommended, you may want to register the template to the "" ""subject (instead of the subject to the template) by skipping the straightening procedure. To do so, use "" ""the parameter '-ref subject'. Example below:\n"" "" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz -ref subject -param "" ""step=1,type=seg,algo=centermassrot,smooth=0:step=2,type=seg,algo=columnwise,smooth=0,smoothWarpXY=2\n"" ""\n"" ""Vertebral alignment (step 2) consists in aligning the vertebrae between the subject and the template. "" ""Two types of labels are possible:\n"" "" - Vertebrae mid-body labels, created at the center of the spinal cord using the parameter '-l';\n"" "" - Posterior edge of the intervertebral discs, using the parameter '-ldisc'.\n"" ""\n"" ""If only one label is provided, a simple translation will be applied between the subject label and the "" ""template label. No scaling will be performed. \n"" ""\n"" ""If two labels are provided, a linear transformation (translation + rotation + superior-inferior linear "" ""scaling) will be applied. The strategy here is to define labels that cover the region of interest. For "" ""example, if you are interested in studying C2 to C6 levels, then provide one label at C2 and another at "" ""C6. However, note that if the two labels are very far apart (e.g. C2 and T12), there might be a "" ""mis-alignment of discs because a subject''s intervertebral discs distance might differ from that of the "" ""template.\n"" ""\n"" ""If more than two labels are used, a non-linear registration will be applied to align the each "" ""intervertebral disc between the subject and the template, as described in "" ""sct_straighten_spinalcord. Note: \n"" "" - Using more than two labels will only work with disc labels (i.e. '-ldisc')\n"" "" - This feature is not compatible with the parameter '-ref subject', where only a rigid registration is performed.\n"" "" - Due to the non-linear registration in the S-I direction, the warping field will be cropped above the top label and below the bottom label. Applying this warping field will result in a strange-looking registered image that has the same value above the top label and below the bottom label. But if you are not interested in these regions, you do not need to worry about it.\n"" ""\n"" ""More information about label creation can be found at "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) ) mandatory = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatory.add_argument( '-i', metavar=Metavar.file, required=True, help=""Input anatomical image. Example: anat.nii.gz"" ) mandatory.add_argument( '-s', metavar=Metavar.file, required=True, help=""Spinal cord segmentation. Example: anat_seg.nii.gz"" ) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit."" ) optional.add_argument( '-s-template-id', metavar=Metavar.int, type=int, help=""Segmentation file ID to use for registration. The ID is an integer indicated in the file "" ""'template/info_label.txt'. This 'info_label.txt' file corresponds to the template indicated by the flag "" ""'-t'. By default, the spinal cord segmentation is used (ID=3), but if available, a different segmentation"" "" such as white matter segmentation could produce better registration results."", default=3 ) optional.add_argument( '-l', metavar=Metavar.file, help=""One or two labels (preferred) located at the center of the spinal cord, on the mid-vertebral slice. "" ""Example: anat_labels.nii.gz\n"" ""For more information about label creation, please see: "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) optional.add_argument( '-ldisc', metavar=Metavar.file, help=""File containing disc labels. Labels can be located either at the posterior edge "" ""of the intervertebral discs, or at the orthogonal projection of each disc onto "" ""the spinal cord (e.g.: the file 'xxx_seg_labeled_discs.nii.gz' output by sct_label_vertebrae).\n"" ""If you are using more than 2 labels, all discs covering the region of interest should be provided. "" ""E.g., if you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,6,7. "" ""For more information about label creation, please refer to "" ""https://spinalcordtoolbox.com/user_section/tutorials/registration-to-template/vertebral-labeling.html"" ) optional.add_argument( '-lspinal', metavar=Metavar.file, help=""Labels located in the center of the spinal cord, at the superior-inferior level corresponding to the "" ""mid-point of the spinal level. Example: anat_labels.nii.gz\n"" ""Each label is a single voxel, which value corresponds to the spinal level (e.g.: 2 for spinal level 2). "" ""If you are using more than 2 labels, all spinal levels covering the region of interest should be "" ""provided (e.g., if you are interested in levels C2 to C7, then you should provide spinal level labels "" ""2,3,4,5,6,7)."" ) optional.add_argument( '-ofolder', metavar=Metavar.folder, action=ActionCreateFolder, help=""Output folder."" ) optional.add_argument( '-t', metavar=Metavar.folder, default=param.path_template, help=""Path to template"" ) optional.add_argument( '-c', choices=['t1', 't2', 't2s'], default='t2', help=""Contrast to use for registration."" ) optional.add_argument( '-ref', choices=['template', 'subject'], default='template', help=""Reference for registration: template: subject->template, subject: template->subject."" ) optional.add_argument( '-param', metavar=Metavar.list, type=list_type(':', str), help=(f""Parameters for registration (see sct_register_multimodal). Default:"" f""\n"" f""step=0\n"" f"" - type={paramregmulti.steps['0'].type}\n"" f"" - dof={paramregmulti.steps['0'].dof}\n"" f""\n"" f""step=1\n"" f"" - type={paramregmulti.steps['1'].type}\n"" f"" - algo={paramregmulti.steps['1'].algo}\n"" f"" - metric={paramregmulti.steps['1'].metric}\n"" f"" - iter={paramregmulti.steps['1'].iter}\n"" f"" - smooth={paramregmulti.steps['1'].smooth}\n"" f"" - gradStep={paramregmulti.steps['1'].gradStep}\n"" f"" - slicewise={paramregmulti.steps['1'].slicewise}\n"" f"" - smoothWarpXY={paramregmulti.steps['1'].smoothWarpXY}\n"" f"" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}\n"" f""\n"" f""step=2\n"" f"" - type={paramregmulti.steps['2'].type}\n"" f"" - algo={paramregmulti.steps['2'].algo}\n"" f"" - metric={paramregmulti.steps['2'].metric}\n"" f"" - iter={paramregmulti.steps['2'].iter}\n"" f"" - smooth={paramregmulti.steps['2'].smooth}\n"" f"" - gradStep={paramregmulti.steps['2'].gradStep}\n"" f"" - slicewise={paramregmulti.steps['2'].slicewise}\n"" f"" - smoothWarpXY={paramregmulti.steps['2'].smoothWarpXY}\n"" f"" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}"") ) optional.add_argument( '-centerline-algo', choices=['polyfit', 'bspline', 'linear', 'nurbs'], default=ParamCenterline().algo_fitting, help=""Algorithm for centerline fitting (when straightening the spinal cord)."" ) optional.add_argument( '-centerline-smooth', metavar=Metavar.int, type=int, default=ParamCenterline().smooth, help=""Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}."" ) optional.add_argument( '-qc', metavar=Metavar.folder, action=ActionCreateFolder, default=param.path_qc, help=""The path where the quality control generated content will be saved."" ) optional.add_argument( '-qc-dataset', metavar=Metavar.str, help=""If provided, this string will be mentioned in the QC report as the dataset the process was run on."" ) optional.add_argument( '-qc-subject', metavar=Metavar.str, help=""If provided, this string will be mentioned in the QC report as the subject the process was run on."" ) optional.add_argument( '-igt', metavar=Metavar.file, help=""File name of ground-truth template cord segmentation (binary nifti)."" ) optional.add_argument( '-r', metavar=Metavar.int, type=int, choices=[0, 1], default=param.remove_temp_files, help=""Whether to remove temporary files. 0 = no, 1 = yes"" ) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"" ) return parser " 4570,"def fdr_threshold(z_vals, alpha): """"""Return the Benjamini-Hochberg FDR threshold for the input z_vals Parameters ---------- z_vals : array A set of z-variates from which the FDR is computed. alpha : float The desired FDR control. Returns ------- threshold : float FDR-controling threshold from the Benjamini-Hochberg procedure. """""" if alpha < 0 or alpha > 1: raise ValueError( 'alpha should be between 0 and 1. {} was provided'.format(alpha)) z_vals_ = - np.sort(- z_vals) p_vals = norm.sf(z_vals_) n_samples = len(p_vals) pos = p_vals < alpha * np.linspace(1/n_samples, 1,n_samples) if pos.any(): return (z_vals_[pos][-1] - 1.e-12) return np.infty ","def fdr_threshold(z_vals, alpha): """"""Return the Benjamini-Hochberg FDR threshold for the input z_vals Parameters ---------- z_vals : array A set of z-variates from which the FDR is computed. alpha : float The desired FDR control. Returns ------- threshold : float FDR-controling threshold from the Benjamini-Hochberg procedure. """""" if alpha < 0 or alpha > 1: raise ValueError( 'alpha should be between 0 and 1. {} was provided'.format(alpha)) z_vals_ = - np.sort(- z_vals) p_vals = norm.sf(z_vals_) n_samples = len(p_vals) pos = p_vals < alpha * np.linspace(1 / n_samples, 1, n_samples) if pos.any(): return (z_vals_[pos][-1] - 1.e-12) return np.infty " 4666,"def tagger(namespace=None): """""" A cloture, or function that returns a function. Returned function tags using a specified namespace. :param string namespace: The XML namespace to use to tag elements :return: tag() function """""" def tag(text): return str(etree.QName(namespace, text)) return tag ","def tagger(namespace=None): """""" A closure, or function that returns a function. Returned function tags using a specified namespace. :param string namespace: The XML namespace to use to tag elements :return: tag() function """""" def tag(text): return str(etree.QName(namespace, text)) return tag " 9657,"def role_exists(module, cursor, rolname): """"""Check user exists or not"""""" query = ""SELECT 1 FROM pg_roles WHERE rolname = '%s'"" % rolname try: cursor.execute(query) if cursor.rowcount > 0: return True except Exception as e: module.fail_json(msg=""Cannot execute SQL '%s': %s"" % (query, to_native(e))) return False ","def role_exists(module, cursor, rolname): """"""Check user exists or not"""""" query = ""SELECT 1 FROM pg_roles WHERE rolname = '%s'"" % pg_quote_identifier(rolname, ""role"") try: cursor.execute(query) if cursor.rowcount > 0: return True except Exception as e: module.fail_json(msg=""Cannot execute SQL '%s': %s"" % (query, to_native(e))) return False " 1609,"def cross_validate(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """"""Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be for example a list, or an array. y : array-like, default=None The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a ""Group"" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list/tuple, dict or None, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. For evaluating multiple metrics, either give a list of (unique) strings or a dict with names as keys and callables as values. NOTE that when using custom scorers, each scorer should return a single value. Metric functions returning a list/array of values can be wrapped into multiple scorers that return one value each. See :ref:`multimetric_grid_search` for an example. If None, the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide ` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int or None, default=None The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. return_estimator : bool, default=False Whether to return the estimators fitted on each split. error_score : 'raise' or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- :func:`sklearn.model_selection.cross_val_score`: Run cross-validation for single metric evaluation. :func:`sklearn.model_selection.cross_val_predict`: Get predictions from each split of cross-validation for diagnostic purposes. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """""" X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) zipped_scores = list(zip(*scores)) if return_train_score: train_scores = zipped_scores.pop(0) train_scores = _aggregate_score_dicts(train_scores) if return_estimator: fitted_estimators = zipped_scores.pop() test_scores, fit_times, score_times = zipped_scores test_scores = _aggregate_score_dicts(test_scores) ret = {} ret['fit_time'] = np.array(fit_times) ret['score_time'] = np.array(score_times) if return_estimator: ret['estimator'] = fitted_estimators for name in scorers: ret['test_%s' % name] = np.array(test_scores[name]) if return_train_score: key = 'train_%s' % name ret[key] = np.array(train_scores[name]) return ret ","def cross_validate(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """"""Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide `. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be for example a list, or an array. y : array-like, default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a ""Group"" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list/tuple, dict or None, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. For evaluating multiple metrics, either give a list of (unique) strings or a dict with names as keys and callables as values. NOTE that when using custom scorers, each scorer should return a single value. Metric functions returning a list/array of values can be wrapped into multiple scorers that return one value each. See :ref:`multimetric_grid_search` for an example. If None, the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide ` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int or None, default=None The number of CPUs to use to do the computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. return_estimator : bool, default=False Whether to return the estimators fitted on each split. error_score : 'raise' or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- :func:`sklearn.model_selection.cross_val_score`: Run cross-validation for single metric evaluation. :func:`sklearn.model_selection.cross_val_predict`: Get predictions from each split of cross-validation for diagnostic purposes. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """""" X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) zipped_scores = list(zip(*scores)) if return_train_score: train_scores = zipped_scores.pop(0) train_scores = _aggregate_score_dicts(train_scores) if return_estimator: fitted_estimators = zipped_scores.pop() test_scores, fit_times, score_times = zipped_scores test_scores = _aggregate_score_dicts(test_scores) ret = {} ret['fit_time'] = np.array(fit_times) ret['score_time'] = np.array(score_times) if return_estimator: ret['estimator'] = fitted_estimators for name in scorers: ret['test_%s' % name] = np.array(test_scores[name]) if return_train_score: key = 'train_%s' % name ret[key] = np.array(train_scores[name]) return ret " 42080,"def _get_skipped_trial_numbers( trials: List[FrozenTrial], used_param_names: Sequence[str] ) -> Set[int]: """"""Utility function for ``plot_parallel_coordinate``. If trial's parameters does not contain a parameter in ``used_param_names``, ``plot_parallel_coordinate`` methods do not use such trails. Args: trials: List of ``FrozenTrials``. used_param_names: The parameter names used in ``plot_parallel_coordinate``. Returns: A list of invalid trial numbers. """""" skipped_trial_numbers = set() for trial in trials: for used_param in used_param_names: if used_param not in trial.params.keys(): skipped_trial_numbers.add(trial.number) break return skipped_trial_numbers ","def _get_skipped_trial_numbers( trials: List[FrozenTrial], used_param_names: Sequence[str] ) -> Set[int]: """"""Utility function for ``plot_parallel_coordinate``. If trial's parameters do not contain a parameter in ``used_param_names``, ``plot_parallel_coordinate`` methods do not use such trails. Args: trials: List of ``FrozenTrials``. used_param_names: The parameter names used in ``plot_parallel_coordinate``. Returns: A list of invalid trial numbers. """""" skipped_trial_numbers = set() for trial in trials: for used_param in used_param_names: if used_param not in trial.params.keys(): skipped_trial_numbers.add(trial.number) break return skipped_trial_numbers " 2702,"def fastica( X, n_components=None, *, algorithm=""parallel"", whiten=""warn"", fun=""logcosh"", fun_args=None, max_iter=200, tol=1e-04, w_init=None, random_state=None, return_X_mean=False, compute_sources=True, return_n_iter=False, ): """"""Perform Fast Independent Component Analysis. The implementation is based on [1]_. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. n_components : int, default=None Number of components to use. If None is passed, all are used. algorithm : {'parallel', 'deflation'}, default='parallel' Apply either a parallel or deflational algorithm for FastICA. whiten : str or bool, default=""warn"" Specify the whitening strategy to use. - If 'arbitrary-variance' (default), a whitening with variance arbitrary is used. - If 'unit-variance', the whitening matrix is rescaled to ensure that each recovered source has unit variance. - If False, the data is already considered to be whitened, and no whitening is performed. .. deprecated:: 1.1 From version 1.3 `whiten='unit-variance'` will be used by default. `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3. Use `whiten=arbitrary-variance` instead. fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. The derivative should be averaged along its last dimension. Example:: def my_g(x): return x ** 3, (3 * x ** 2).mean(axis=-1) fun_args : dict, default=None Arguments to send to the functional form. If empty or None and if fun='logcosh', fun_args will take value {'alpha' : 1.0}. max_iter : int, default=200 Maximum number of iterations to perform. tol : float, default=1e-4 A positive scalar giving the tolerance at which the un-mixing matrix is considered to have converged. w_init : ndarray of shape (n_components, n_components), default=None Initial un-mixing array. If `w_init=None` then an array of normal r.v.'s is used. random_state : int, RandomState instance or None, default=None Used to initialize ``w_init`` when not specified, with a normal distribution. Pass an int, for reproducible results across multiple function calls. See :term:`Glossary `. return_X_mean : bool, default=False If True, X_mean is returned too. compute_sources : bool, default=True If False, sources are not computed, but only the rotation matrix. This can save memory when working with big data. Defaults to True. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- K : ndarray of shape (n_components, n_features) or None If whiten is 'True', K is the pre-whitening matrix that projects data onto the first n_components principal components. If whiten is 'False', K is 'None'. W : ndarray of shape (n_components, n_components) The square matrix that unmixes the data after whitening. The mixing matrix is the pseudo-inverse of matrix ``W K`` if K is not None, else it is the inverse of W. S : ndarray of shape (n_samples, n_components) or None Estimated source matrix. X_mean : ndarray of shape (n_features,) The mean over features. Returned only if return_X_mean is True. n_iter : int If the algorithm is ""deflation"", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. This is returned only when return_n_iter is set to `True`. Notes ----- The data matrix X is considered to be a linear combination of non-Gaussian (independent) components i.e. X = AS where columns of S contain the independent components and A is a linear mixing matrix. In short ICA attempts to `un-mix' the data by estimating an un-mixing matrix W where ``S = W K X.`` While FastICA was proposed to estimate as many sources as features, it is possible to estimate less by setting n_components < n_features. It this case K is not a square matrix and the estimated A is the pseudo-inverse of ``W K``. This implementation was originally made for data of shape [n_features, n_samples]. Now the input is transposed before the algorithm is applied. This makes it slightly faster for Fortran-ordered input. References ---------- .. [1] A. Hyvarinen and E. Oja, ""Fast Independent Component Analysis"", Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430. """""" est = FastICA( n_components=n_components, algorithm=algorithm, whiten=whiten, fun=fun, fun_args=fun_args, max_iter=max_iter, tol=tol, w_init=w_init, random_state=random_state, ) S = est._fit(X, compute_sources=compute_sources) if est._whiten in [""unit-variance"", ""arbitrary-variance""]: K = est.whitening_ X_mean = est.mean_ else: K = None X_mean = None returned_values = [K, est._unmixing, S] if return_X_mean: returned_values.append(X_mean) if return_n_iter: returned_values.append(est.n_iter_) return returned_values ","def fastica( X, n_components=None, *, algorithm=""parallel"", whiten=""warn"", fun=""logcosh"", fun_args=None, max_iter=200, tol=1e-04, w_init=None, random_state=None, return_X_mean=False, compute_sources=True, return_n_iter=False, ): """"""Perform Fast Independent Component Analysis. The implementation is based on [1]_. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. n_components : int, default=None Number of components to use. If None is passed, all are used. algorithm : {'parallel', 'deflation'}, default='parallel' Apply either a parallel or deflational algorithm for FastICA. whiten : str or bool, default=""warn"" Specify the whitening strategy to use. - If 'arbitrary-variance' (default), a whitening with variance arbitrary is used. - If 'unit-variance', the whitening matrix is rescaled to ensure that each recovered source has unit variance. - If False, the data is already considered to be whitened, and no whitening is performed. .. deprecated:: 1.1 Starting in v1.3, `whiten='unit-variance'` will be used by default. `whiten=True` is deprecated from 1.1 and will raise ValueError in 1.3. Use `whiten=arbitrary-variance` instead. fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh' The functional form of the G function used in the approximation to neg-entropy. Could be either 'logcosh', 'exp', or 'cube'. You can also provide your own function. It should return a tuple containing the value of the function, and of its derivative, in the point. The derivative should be averaged along its last dimension. Example:: def my_g(x): return x ** 3, (3 * x ** 2).mean(axis=-1) fun_args : dict, default=None Arguments to send to the functional form. If empty or None and if fun='logcosh', fun_args will take value {'alpha' : 1.0}. max_iter : int, default=200 Maximum number of iterations to perform. tol : float, default=1e-4 A positive scalar giving the tolerance at which the un-mixing matrix is considered to have converged. w_init : ndarray of shape (n_components, n_components), default=None Initial un-mixing array. If `w_init=None` then an array of normal r.v.'s is used. random_state : int, RandomState instance or None, default=None Used to initialize ``w_init`` when not specified, with a normal distribution. Pass an int, for reproducible results across multiple function calls. See :term:`Glossary `. return_X_mean : bool, default=False If True, X_mean is returned too. compute_sources : bool, default=True If False, sources are not computed, but only the rotation matrix. This can save memory when working with big data. Defaults to True. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- K : ndarray of shape (n_components, n_features) or None If whiten is 'True', K is the pre-whitening matrix that projects data onto the first n_components principal components. If whiten is 'False', K is 'None'. W : ndarray of shape (n_components, n_components) The square matrix that unmixes the data after whitening. The mixing matrix is the pseudo-inverse of matrix ``W K`` if K is not None, else it is the inverse of W. S : ndarray of shape (n_samples, n_components) or None Estimated source matrix. X_mean : ndarray of shape (n_features,) The mean over features. Returned only if return_X_mean is True. n_iter : int If the algorithm is ""deflation"", n_iter is the maximum number of iterations run across all components. Else they are just the number of iterations taken to converge. This is returned only when return_n_iter is set to `True`. Notes ----- The data matrix X is considered to be a linear combination of non-Gaussian (independent) components i.e. X = AS where columns of S contain the independent components and A is a linear mixing matrix. In short ICA attempts to `un-mix' the data by estimating an un-mixing matrix W where ``S = W K X.`` While FastICA was proposed to estimate as many sources as features, it is possible to estimate less by setting n_components < n_features. It this case K is not a square matrix and the estimated A is the pseudo-inverse of ``W K``. This implementation was originally made for data of shape [n_features, n_samples]. Now the input is transposed before the algorithm is applied. This makes it slightly faster for Fortran-ordered input. References ---------- .. [1] A. Hyvarinen and E. Oja, ""Fast Independent Component Analysis"", Algorithms and Applications, Neural Networks, 13(4-5), 2000, pp. 411-430. """""" est = FastICA( n_components=n_components, algorithm=algorithm, whiten=whiten, fun=fun, fun_args=fun_args, max_iter=max_iter, tol=tol, w_init=w_init, random_state=random_state, ) S = est._fit(X, compute_sources=compute_sources) if est._whiten in [""unit-variance"", ""arbitrary-variance""]: K = est.whitening_ X_mean = est.mean_ else: K = None X_mean = None returned_values = [K, est._unmixing, S] if return_X_mean: returned_values.append(X_mean) if return_n_iter: returned_values.append(est.n_iter_) return returned_values " 30181,"def in_toto_run(name, material_list, product_list, link_cmd_args, record_streams=False, signing_key=None, gpg_keyid=None, gpg_use_default=False, gpg_home=None, exclude_patterns=None, base_path=None, compact_json=False, record_environment=False, normalize_line_endings=False, lstrip_paths=None, quiet=True): """""" Calls functions in this module to run the command passed as link_cmd_args argument and to store materials, products, by-products and environment information into a link metadata file. The link metadata file is signed either with the passed signing_key, or a gpg key identified by the passed gpg_keyid or with the default gpg key if gpg_use_default is True. Even if multiple key parameters are passed, only one key is used for signing (in above order of precedence). The link file is dumped to `link.FILENAME_FORMAT` using the signing key's keyid. If no key parameter is passed the link is neither signed nor dumped. name: A unique name to relate link metadata with a step or inspection defined in the layout. material_list: List of file or directory paths that should be recorded as materials. product_list: List of file or directory paths that should be recorded as products. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: (optional) A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). signing_key: (optional) If not None, link metadata is signed with this key. Format is securesystemslib.formats.KEY_SCHEMA gpg_keyid: (optional) If not None, link metadata is signed with a gpg key identified by the passed keyid. gpg_use_default: (optional) If True, link metadata is signed with default gpg key. gpg_home: (optional) Path to GPG keyring (if not set the default keyring is used). exclude_patterns: (optional) Artifacts matched by the pattern are excluded from the materials and products sections in the resulting link. base_path: (optional) If passed, record artifacts relative to base_path. Default is current working directory. NOTE: The base_path part of the recorded material is not included in the resulting preliminary link's material/product sections. compact_json: (optional) Whether or not to use the most compact json representation. record_environment: (optional) if values such as workdir should be recorded on the environment dictionary (false by default) normalize_line_endings: (optional) If True, replaces windows and mac line endings with unix line endings before hashing materials and products, for cross-platform support. lstrip_paths: (optional) If a prefix path is passed, the prefix is left stripped from the path of every artifact that contains the prefix. securesystemslib.FormatError if a signing_key is passed and does not match securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or base_path is passed and does not match securesystemslib.formats.PATH_SCHEMA or is not a directory. securesystemslib.gpg.exceptions.CommandError: If gpg is used for signing and the command exits with a non-zero code. If a key parameter is passed for signing, the newly created link metadata file is written to disk using the filename scheme: `link.FILENAME_FORMAT` Newly created Metablock object containing a Link object """""" LOG.info(""Running '{}'..."".format(name)) # Check key formats to fail early if signing_key: _check_match_signing_key(signing_key) if gpg_keyid: securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid) if exclude_patterns: securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns) if base_path: securesystemslib.formats.PATH_SCHEMA.check_match(base_path) if material_list: LOG.info(""Recording materials '{}'..."".format("", "".join(material_list))) materials_dict = record_artifacts_as_dict(material_list, exclude_patterns=exclude_patterns, base_path=base_path, follow_symlink_dirs=True, normalize_line_endings=normalize_line_endings, lstrip_paths=lstrip_paths) if link_cmd_args: LOG.info(""Running command '{}'..."".format("" "".join(link_cmd_args))) byproducts = execute_link(link_cmd_args, record_streams, quiet=True) else: byproducts = {} if product_list: securesystemslib.formats.PATHS_SCHEMA.check_match(product_list) LOG.info(""Recording products '{}'..."".format("", "".join(product_list))) products_dict = record_artifacts_as_dict(product_list, exclude_patterns=exclude_patterns, base_path=base_path, follow_symlink_dirs=True, normalize_line_endings=normalize_line_endings, lstrip_paths=lstrip_paths) LOG.info(""Creating link metadata..."") environment = {} if record_environment: environment['workdir'] = os.getcwd().replace('\\', '/') link = in_toto.models.link.Link(name=name, materials=materials_dict, products=products_dict, command=link_cmd_args, byproducts=byproducts, environment=environment) link_metadata = Metablock(signed=link, compact_json=compact_json) signature = None if signing_key: LOG.info(""Signing link metadata using passed key..."") signature = link_metadata.sign(signing_key) elif gpg_keyid: LOG.info(""Signing link metadata using passed GPG keyid..."") signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home) elif gpg_use_default: LOG.info(""Signing link metadata using default GPG key ..."") signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home) # We need the signature's keyid to write the link to keyid infix'ed filename if signature: signing_keyid = signature[""keyid""] filename = FILENAME_FORMAT.format(step_name=name, keyid=signing_keyid) LOG.info(""Storing link metadata to '{}'..."".format(filename)) link_metadata.dump(filename) return link_metadata ","def in_toto_run(name, material_list, product_list, link_cmd_args, record_streams=False, signing_key=None, gpg_keyid=None, gpg_use_default=False, gpg_home=None, exclude_patterns=None, base_path=None, compact_json=False, record_environment=False, normalize_line_endings=False, lstrip_paths=None, quiet=True): """""" Calls functions in this module to run the command passed as link_cmd_args argument and to store materials, products, by-products and environment information into a link metadata file. The link metadata file is signed either with the passed signing_key, or a gpg key identified by the passed gpg_keyid or with the default gpg key if gpg_use_default is True. Even if multiple key parameters are passed, only one key is used for signing (in above order of precedence). The link file is dumped to `link.FILENAME_FORMAT` using the signing key's keyid. If no key parameter is passed the link is neither signed nor dumped. name: A unique name to relate link metadata with a step or inspection defined in the layout. material_list: List of file or directory paths that should be recorded as materials. product_list: List of file or directory paths that should be recorded as products. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: (optional) A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). signing_key: (optional) If not None, link metadata is signed with this key. Format is securesystemslib.formats.KEY_SCHEMA gpg_keyid: (optional) If not None, link metadata is signed with a gpg key identified by the passed keyid. gpg_use_default: (optional) If True, link metadata is signed with default gpg key. gpg_home: (optional) Path to GPG keyring (if not set the default keyring is used). exclude_patterns: (optional) Artifacts matched by the pattern are excluded from the materials and products sections in the resulting link. base_path: (optional) If passed, record artifacts relative to base_path. Default is current working directory. NOTE: The base_path part of the recorded material is not included in the resulting preliminary link's material/product sections. compact_json: (optional) Whether or not to use the most compact json representation. record_environment: (optional) if values such as workdir should be recorded on the environment dictionary (false by default) normalize_line_endings: (optional) If True, replaces windows and mac line endings with unix line endings before hashing materials and products, for cross-platform support. lstrip_paths: (optional) If a prefix path is passed, the prefix is left stripped from the path of every artifact that contains the prefix. securesystemslib.FormatError if a signing_key is passed and does not match securesystemslib.formats.KEY_SCHEMA or a gpg_keyid is passed and does not match securesystemslib.formats.KEYID_SCHEMA or exclude_patterns are passed and don't match securesystemslib.formats.NAMES_SCHEMA, or base_path is passed and does not match securesystemslib.formats.PATH_SCHEMA or is not a directory. securesystemslib.gpg.exceptions.CommandError: If gpg is used for signing and the command exits with a non-zero code. If a key parameter is passed for signing, the newly created link metadata file is written to disk using the filename scheme: `link.FILENAME_FORMAT` Newly created Metablock object containing a Link object """""" LOG.info(""Running '{}'..."".format(name)) # Check key formats to fail early if signing_key: _check_match_signing_key(signing_key) if gpg_keyid: securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid) if exclude_patterns: securesystemslib.formats.NAMES_SCHEMA.check_match(exclude_patterns) if base_path: securesystemslib.formats.PATH_SCHEMA.check_match(base_path) if material_list: LOG.info(""Recording materials '{}'..."".format("", "".join(material_list))) materials_dict = record_artifacts_as_dict(material_list, exclude_patterns=exclude_patterns, base_path=base_path, follow_symlink_dirs=True, normalize_line_endings=normalize_line_endings, lstrip_paths=lstrip_paths) if link_cmd_args: LOG.info(""Running command '{}'..."".format("" "".join(link_cmd_args))) byproducts = execute_link(link_cmd_args, record_streams, quiet=quiet) else: byproducts = {} if product_list: securesystemslib.formats.PATHS_SCHEMA.check_match(product_list) LOG.info(""Recording products '{}'..."".format("", "".join(product_list))) products_dict = record_artifacts_as_dict(product_list, exclude_patterns=exclude_patterns, base_path=base_path, follow_symlink_dirs=True, normalize_line_endings=normalize_line_endings, lstrip_paths=lstrip_paths) LOG.info(""Creating link metadata..."") environment = {} if record_environment: environment['workdir'] = os.getcwd().replace('\\', '/') link = in_toto.models.link.Link(name=name, materials=materials_dict, products=products_dict, command=link_cmd_args, byproducts=byproducts, environment=environment) link_metadata = Metablock(signed=link, compact_json=compact_json) signature = None if signing_key: LOG.info(""Signing link metadata using passed key..."") signature = link_metadata.sign(signing_key) elif gpg_keyid: LOG.info(""Signing link metadata using passed GPG keyid..."") signature = link_metadata.sign_gpg(gpg_keyid, gpg_home=gpg_home) elif gpg_use_default: LOG.info(""Signing link metadata using default GPG key ..."") signature = link_metadata.sign_gpg(gpg_keyid=None, gpg_home=gpg_home) # We need the signature's keyid to write the link to keyid infix'ed filename if signature: signing_keyid = signature[""keyid""] filename = FILENAME_FORMAT.format(step_name=name, keyid=signing_keyid) LOG.info(""Storing link metadata to '{}'..."".format(filename)) link_metadata.dump(filename) return link_metadata " 40064,"def _analyze_states(state: GlobalState) -> list: """""" :param state: the current state :return: returns the issues for that corresponding state """""" call = get_call_from_state(state) issues = [] if call.type is not ""DELEGATECALL"": return [] if call.node.function_name is not ""fallback"": return [] state = call.state address = state.get_current_instruction()[""address""] meminstart = get_variable(state.mstate.stack[-3]) if meminstart.type == VarType.CONCRETE: issues += _concrete_call(call, state, address, meminstart) return issues ","def _analyze_states(state: GlobalState) -> List[Issue]: """""" :param state: the current state :return: returns the issues for that corresponding state """""" call = get_call_from_state(state) issues = [] if call.type is not ""DELEGATECALL"": return [] if call.node.function_name is not ""fallback"": return [] state = call.state address = state.get_current_instruction()[""address""] meminstart = get_variable(state.mstate.stack[-3]) if meminstart.type == VarType.CONCRETE: issues += _concrete_call(call, state, address, meminstart) return issues " 9100,"def _create_outline_item( action_ref: IndirectObject, title: str, color: Union[Tuple[float, float, float], str, None], italic: bool, bold: bool, ) -> TreeObject: outline_item = TreeObject() outline_item.update( { NameObject(""/A""): action_ref, NameObject(""/Title""): create_string_object(title), } ) if color: if isinstance(color, str): color = hex_to_rgb(color) outline_item.update( {NameObject(""/C""): ArrayObject([FloatObject(c) for c in color])} ) if italic | bold: format_flag = 0 if italic: format_flag += 1 if bold: format_flag += 2 outline_item.update({NameObject(""/F""): NumberObject(format_flag)}) return outline_item ","def _create_outline_item( action_ref: IndirectObject, title: str, color: Union[Tuple[float, float, float], str, None], italic: bool, bold: bool, ) -> TreeObject: outline_item = TreeObject() outline_item.update( { NameObject(""/A""): action_ref, NameObject(""/Title""): create_string_object(title), } ) if color: if isinstance(color, str): color = hex_to_rgb(color) outline_item.update( {NameObject(""/C""): ArrayObject([FloatObject(c) for c in color])} ) if italic or bold: format_flag = 0 if italic: format_flag += 1 if bold: format_flag += 2 outline_item.update({NameObject(""/F""): NumberObject(format_flag)}) return outline_item " 1837,"def test_isotonic_2darray_1_feature(): # Test from GH#15012 # Check that IsotonicRegression can handle 2darray with only 1 feature # https://github.com/scikit-learn/scikit-learn/issues/15012 X = np.array(list(range(10))) X_2d = X.reshape(-1, 1) y = np.array([0, 1, 2, 6, 5, 4, 3, 7, 8, 9]) iso_reg = IsotonicRegression().fit(X, y) iso_reg_2d = IsotonicRegression().fit(X_2d, y) y_pred1 = iso_reg.predict(X) y_pred2 = iso_reg_2d.predict(X_2d) assert_array_equal(y_pred1, y_pred2) ","def test_isotonic_2darray_1_feature(): # Test from GH#15012 # Check that IsotonicRegression can handle 2darray with only 1 feature # https://github.com/scikit-learn/scikit-learn/issues/15012 X = np.arange(10) X_2d = X.reshape(-1, 1) y = np.array([0, 1, 2, 6, 5, 4, 3, 7, 8, 9]) iso_reg = IsotonicRegression().fit(X, y) iso_reg_2d = IsotonicRegression().fit(X_2d, y) y_pred1 = iso_reg.predict(X) y_pred2 = iso_reg_2d.predict(X_2d) assert_array_equal(y_pred1, y_pred2) " 1766,"def test_baseline_poisson_loss(): rng = np.random.RandomState(0) loss = _LOSSES['poisson_loss'](sample_weight=None) y_train = rng.poisson(size=100).astype(np.float64) # Make sure at least one sample point is larger than zero y_train[0] = 1. baseline_prediction = loss.get_baseline_prediction(y_train, None, 1) assert baseline_prediction.shape == tuple() # scalar assert baseline_prediction.dtype == y_train.dtype assert_all_finite(baseline_prediction) # Make sure baseline prediction produces the mean of all targets y_baseline = loss.inverse_link_function(baseline_prediction) assert_almost_equal(np.mean(y_baseline), y_train.mean()) # Test baseline for y_true = 0 y_train.fill(0.) baseline_prediction = loss.get_baseline_prediction(y_train, None, 1) assert_all_finite(baseline_prediction) ","def test_baseline_poisson_loss(): rng = np.random.RandomState(0) loss = _LOSSES['poisson_loss'](sample_weight=None) y_train = rng.poisson(size=100).astype(np.float64) # Make sure at least one sample point is larger than zero y_train[0] = 1. baseline_prediction = loss.get_baseline_prediction(y_train, None, 1) assert baseline_prediction.shape == tuple() # scalar assert baseline_prediction.dtype == y_train.dtype assert_all_finite(baseline_prediction) # Make sure baseline prediction produces the mean of all targets y_baseline = loss.inverse_link_function(baseline_prediction) assert_almost_equal(np.mean(y_baseline), y_train.mean()) # Test baseline for y_true = 0 y_train = np.zeros(100) baseline_prediction = loss.get_baseline_prediction(y_train, None, 1) assert_all_finite(baseline_prediction) " 30976,"def map_scim(scim): try: scim = json.loads(scim) except Exception: pass if type(scim) != dict: raise Exception('Provided client data is not JSON compatible') mapping = { ""userName"": ""userName"", ""email"": ""emails(val.primary && val.primary==true).[0].value"", ""id"": ""id"", ""office_country"": ""addresses(val.primary && val.primary==true).country"", } parsed_scim = dict() for k, v in mapping.items(): try: value = demisto.dt(scim, v) if(type(value) == list): parsed_scim[k] = value[0] else: parsed_scim[k] = value except Exception: parsed_scim[k] = None return parsed_scim ","def map_scim(scim): try: scim = json.loads(scim) except Exception: pass if type(scim) != dict: raise DemistoException('Provided client data is not JSON compatible') mapping = { ""userName"": ""userName"", ""email"": ""emails(val.primary && val.primary==true).[0].value"", ""id"": ""id"", ""office_country"": ""addresses(val.primary && val.primary==true).country"", } parsed_scim = dict() for k, v in mapping.items(): try: value = demisto.dt(scim, v) if(type(value) == list): parsed_scim[k] = value[0] else: parsed_scim[k] = value except Exception: parsed_scim[k] = None return parsed_scim " 32500,"def get_report_triage(client: Client, **args) -> CommandResults: """""" Outputs a score, should map to a DBot score """""" sample_id = args.get(""sample_id"") task_id = args.get(""task_id"") if not task_id.startswith(""behavioral""): return_error( ""Only behavioral reports can be retrieved with this command. "" ""Task ID must be 'behavioral' followed by a number. "" ""E.G: 'behavioral1'"" ) r = client._http_request(""GET"", f""samples/{sample_id}/{task_id}/report_triage.json"") score = 0 indicator: Any if 'sample' in r: if 'score' in r['sample']: score = map_scores_to_dbot(r['sample']['score']) target = r['sample']['target'] if ""sha256"" not in r['sample']: dbot_score = Common.DBotScore( indicator=target, indicator_type=DBotScoreType.URL, integration_name=""Hatching Triage"", score=score ) indicator = Common.URL( url=target, dbot_score=dbot_score ) else: dbot_score = Common.DBotScore( indicator=r['sample']['sha256'], indicator_type=DBotScoreType.FILE, integration_name=""Hatching Triage"", score=score ) indicator = Common.File( name=target, sha256=r['sample']['sha256'], md5=r['sample']['md5'], sha1=r['sample']['sha1'], dbot_score=dbot_score ) results = CommandResults( outputs_prefix=""Triage.sample.reports.triage"", outputs_key_field=""sample.id"", outputs=r, indicator=indicator ) return results ","def get_report_triage(client: Client, **args) -> CommandResults: """""" Outputs a score, should map to a DBot score """""" sample_id = args.get(""sample_id"") task_id = args.get(""task_id"") if not str(task_id).startswith(""behavioral""): return_error( ""Only behavioral reports can be retrieved with this command. "" ""Task ID must be 'behavioral' followed by a number. "" ""E.G: 'behavioral1'"" ) r = client._http_request(""GET"", f""samples/{sample_id}/{task_id}/report_triage.json"") score = 0 indicator: Any if 'sample' in r: if 'score' in r['sample']: score = map_scores_to_dbot(r['sample']['score']) target = r['sample']['target'] if ""sha256"" not in r['sample']: dbot_score = Common.DBotScore( indicator=target, indicator_type=DBotScoreType.URL, integration_name=""Hatching Triage"", score=score ) indicator = Common.URL( url=target, dbot_score=dbot_score ) else: dbot_score = Common.DBotScore( indicator=r['sample']['sha256'], indicator_type=DBotScoreType.FILE, integration_name=""Hatching Triage"", score=score ) indicator = Common.File( name=target, sha256=r['sample']['sha256'], md5=r['sample']['md5'], sha1=r['sample']['sha1'], dbot_score=dbot_score ) results = CommandResults( outputs_prefix=""Triage.sample.reports.triage"", outputs_key_field=""sample.id"", outputs=r, indicator=indicator ) return results " 9371,"def run_module(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( cpm_action=dict(choices=['getport', 'setport'], required=True), cpm_url=dict(type='str', required=True), cpm_username=dict(type='str', required=True), cpm_password=dict(type='str', required=True, no_log=True), serial_port=dict(type='str', required=True), serial_portname=dict(type='str', required=False, default=None), serial_baud=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), serial_handshake=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3]), serial_stopbits=dict(type='int', required=False, default=None, choices=[0, 1]), serial_parity=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]), serial_mode=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4]), serial_cmd=dict(type='int', required=False, default=None, choices=[0, 1]), serial_seq=dict(type='int', required=False, default=None, choices=[1, 2, 3]), serial_tout=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]), serial_echo=dict(type='int', required=False, default=None, choices=[0, 1]), serial_break=dict(type='int', required=False, default=None, choices=[0, 1]), serial_logoff=dict(type='str', required=False, default=None), use_https=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), use_proxy=dict(type='bool', default=False) ) result = dict( changed=False, data='' ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if module.check_mode: return result auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])), errors='surrogate_or_strict'))) if module.params['use_https'] is True: protocol = ""https://"" else: protocol = ""http://"" payload = None if (module.params['cpm_action'] == 'getport'): fullurl = (""%s%s/api/v2/config/serialports?ports=%s"" % (protocol, to_native(module.params['cpm_url']), to_native(module.params['serial_port']))) method = 'GET' elif (module.params['cpm_action'] == 'setport'): payload = assemble_json(module) fullurl = (""%s%s/api/v2/config/serialports"" % (protocol, to_native(module.params['cpm_url']))) method = 'POST' try: response = open_url(fullurl, data=payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'], headers={'Content-Type': 'application/json', 'Authorization': ""Basic %s"" % auth}) if (method != 'GET'): result['changed'] = True except HTTPError as e: fail_json = dict(msg='Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except URLError as e: fail_json = dict(msg='Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except SSLValidationError as e: fail_json = dict(msg='Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except ConnectionError as e: fail_json = dict(msg='Error connecting to for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) result['data'] = json.loads(response.read()) module.exit_json(**result) ","def run_module(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( cpm_action=dict(choices=['getport', 'setport'], required=True), cpm_url=dict(type='str', required=True), cpm_username=dict(type='str', required=True), cpm_password=dict(type='str', required=True, no_log=True), serial_port=dict(type='int', required=True), serial_portname=dict(type='str', required=False, default=None), serial_baud=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), serial_handshake=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3]), serial_stopbits=dict(type='int', required=False, default=None, choices=[0, 1]), serial_parity=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]), serial_mode=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4]), serial_cmd=dict(type='int', required=False, default=None, choices=[0, 1]), serial_seq=dict(type='int', required=False, default=None, choices=[1, 2, 3]), serial_tout=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5]), serial_echo=dict(type='int', required=False, default=None, choices=[0, 1]), serial_break=dict(type='int', required=False, default=None, choices=[0, 1]), serial_logoff=dict(type='str', required=False, default=None), use_https=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), use_proxy=dict(type='bool', default=False) ) result = dict( changed=False, data='' ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if module.check_mode: return result auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])), errors='surrogate_or_strict'))) if module.params['use_https'] is True: protocol = ""https://"" else: protocol = ""http://"" payload = None if (module.params['cpm_action'] == 'getport'): fullurl = (""%s%s/api/v2/config/serialports?ports=%s"" % (protocol, to_native(module.params['cpm_url']), to_native(module.params['serial_port']))) method = 'GET' elif (module.params['cpm_action'] == 'setport'): payload = assemble_json(module) fullurl = (""%s%s/api/v2/config/serialports"" % (protocol, to_native(module.params['cpm_url']))) method = 'POST' try: response = open_url(fullurl, data=payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'], headers={'Content-Type': 'application/json', 'Authorization': ""Basic %s"" % auth}) if (method != 'GET'): result['changed'] = True except HTTPError as e: fail_json = dict(msg='Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except URLError as e: fail_json = dict(msg='Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except SSLValidationError as e: fail_json = dict(msg='Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except ConnectionError as e: fail_json = dict(msg='Error connecting to for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) result['data'] = json.loads(response.read()) module.exit_json(**result) " 46914,"def get_logger(name: Optional[str] = None) -> logging.Logger: """""" eturn a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """""" if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) ","def get_logger(name: Optional[str] = None) -> logging.Logger: """""" Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """""" if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) " 2946,"def eval( expr, parser=""pandas"", engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False, ): """""" Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements `__, only Python `expressions `__. parser : str, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance ` documentation for more details. engine : str or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance ` documentation for more details. """""" from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, ""inplace"") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != """"] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError( ""multi-line expressions are only valid in the "" ""context of data, use DataFrame.eval"" ) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope( level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target, ) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError( ""Multi-line expressions are only valid"" "" if all expressions contain an assignment"" ) elif inplace: raise ValueError(""Cannot operate inplace if there is no assignment"") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError(""Cannot return a copy of the target"") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError(""Cannot assign expression output to target"") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret ","def eval( expr, parser=""pandas"", engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False, ): """""" Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements `__, only Python `expressions `__. parser : {'pandas', 'python'}, optional, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance ` documentation for more details. engine : str or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance ` documentation for more details. """""" from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, ""inplace"") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != """"] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError( ""multi-line expressions are only valid in the "" ""context of data, use DataFrame.eval"" ) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope( level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target, ) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError( ""Multi-line expressions are only valid"" "" if all expressions contain an assignment"" ) elif inplace: raise ValueError(""Cannot operate inplace if there is no assignment"") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError(""Cannot return a copy of the target"") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError(""Cannot assign expression output to target"") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret " 20259,"def create_sublanding_filterable_page(page_title, page_slug, \ parent_path=None, has_filter=True, filter_is_expanded=False): # create a new page and set it as the child of an existing page # return list of route paths # get the root of the current site site_model = apps.get_model('wagtailcore', 'Site') site = site_model.objects.get(is_default_site=True) root = site.root_page # since parent was not provided, make root parent = root # if a parent path is provided, use that as parent if parent_path: path_components = \ [component for component in parent_path.split('/') if component] try: route = root.route(None, path_components) except Http404: print(""skipping page creation"") parent = route.page # create page, add it as a child of parent, save, and publish new_page = SublandingFilterablePage(title=page_title, slug=page_slug) # if page has a filter, add it if has_filter: new_page.content = json.dumps([ {'type':'filter_controls', \ 'value':{ 'is_expanded': filter_is_expanded, 'categories': {'page_type': 'blog'}, 'topic_filtering': 'sort_alphabetically', 'language': True, } } ]) try: parent.add_child(instance=new_page) new_page.save_revision().publish() except ValidationError: print(""skipping page creation"") # return path return new_page.get_url(None, site) ","def create_sublanding_filterable_page(page_title, page_slug, \ parent_path=None, has_filter=True, filter_is_expanded=False): # create a new page and set it as the child of an existing page # return list of route paths # get the root of the current site site_model = apps.get_model('wagtailcore', 'Site') site = site_model.objects.get(is_default_site=True) root = site.root_page # since parent was not provided, make root parent = root # if a parent path is provided, use that as parent if parent_path: path_components = \ [component for component in parent_path.split('/') if component] try: route = root.route(None, path_components) except Http404: print(""skipping page creation"") parent = route.page # create page, add it as a child of parent, save, and publish new_page = SublandingFilterablePage(title=page_title, slug=page_slug) # if page has a filter, add it if has_filter: new_page.content = json.dumps([ {'type':'filter_controls', 'value':{ 'is_expanded': filter_is_expanded, 'categories': {'page_type': 'blog'}, 'topic_filtering': 'sort_alphabetically', 'language': True, } } ]) try: parent.add_child(instance=new_page) new_page.save_revision().publish() except ValidationError: print(""skipping page creation"") # return path return new_page.get_url(None, site) " 13887,"def docker_container_id(version: str) -> None: return ""gcovr-qa-{}"".format(version) ","def docker_container_id(version: str) -> str: return ""gcovr-qa-{}"".format(version) " 47556,"def flax_shard_checkpoint(params, max_shard_size=""10GB""): """""" Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `""10GB""`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `""5MB""`). """""" max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = {} current_block_size = 0 total_size = 0 # flatten the weights to chunk weights = flatten_dict(params, sep=""/"") for item in weights: weight_size = np.array(weights[item]).size * dtype_byte_size(weights[item].dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = {} current_block_size = 0 current_block[item] = weights[item] current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = FLAX_WEIGHTS_NAME.replace("".msgpack"", f""-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack"") shards[shard_file] = shard for weight_name in shard.keys(): weight_map[weight_name] = shard_file # Add the metadata metadata = {""total_size"": total_size} index = {""metadata"": metadata, ""weight_map"": weight_map} return shards, index ","def flax_shard_checkpoint(params, max_shard_size=""10GB""): """""" Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `""10GB""`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `""5MB""`). """""" max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = {} current_block_size = 0 total_size = 0 # flatten the weights to chunk weights = flatten_dict(params, sep=""/"") for item in weights: weight_size = np.array(weights[item]).size * dtype_byte_size(weights[item].dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = {} current_block_size = 0 current_block[item] = weights[item] current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = FLAX_WEIGHTS_NAME.replace("".msgpack"", f""-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack"") shards[shard_file] = shard for weight_name in shard.keys(): weight_map[weight_name] = shard_file # Add the metadata metadata = {""total_size"": total_size} index = {""metadata"": metadata, ""weight_map"": weight_map} return shards, index " 32587,"def main() -> None: """""" main function """""" params: Dict[str, Any] = demisto.params() args: Dict[str, Any] = demisto.args() command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() if isinstance(params, dict): creds = params.get('credentials') if isinstance(creds, dict): api = creds.get('password') auth_id = creds.get('identifier') headers = { 'Authorization': f'{api}', 'x-xdr-auth-id': f'{auth_id}', 'Content-Type': 'application/json' } url_suffix = ""/public_api/v1"" url = params['url'] add_sensitive_log_strs(api) base_url = urljoin(url, url_suffix) client = Client( base_url=base_url, verify=True, headers=headers, proxy=False, auth=None) commands = { 'asm-getexternalservices': getexternalservices_command, 'asm-getexternalservice': getexternalservice_command, 'asm-getexternalipaddressranges': getexternalipaddressranges_command, 'asm-getexternalipaddressrange': getexternalipaddressrange_command, 'asm-getassetsinternetexposure': getassetsinternetexposure_command, 'asm-getassetinternetexposure': getassetinternetexposure_command, } if command == 'test-module': test_module(client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(str(e)) ","def main() -> None: """""" main function """""" params: Dict[str, Any] = demisto.params() args: Dict[str, Any] = demisto.args() command = demisto.command() demisto.debug(f'Command being called is {command}') try: requests.packages.urllib3.disable_warnings() if isinstance(params, dict): creds = params.get('credentials') if isinstance(creds, dict): api = creds.get('password') auth_id = creds.get('identifier') headers = { 'Authorization': f'{api}', 'x-xdr-auth-id': f'{auth_id}', 'Content-Type': 'application/json' } url_suffix = ""/public_api/v1"" url = params['url'] add_sensitive_log_strs(api) base_url = urljoin(url, url_suffix) client = Client( base_url=base_url, verify=True, headers=headers, proxy=False, auth=None) commands = { 'asm-getexternalservices': getexternalservices_command, 'asm-getexternalservice': getexternalservice_command, 'asm-getexternalipaddressranges': getexternalipaddressranges_command, 'asm-getexternalipaddressrange': getexternalipaddressrange_command, 'asm-getassetsinternetexposure': getassetsinternetexposure_command, 'asm-getassetinternetexposure': getassetinternetexposure_command, } if command == 'test-module': test_module(client) elif command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError(f'{command} command is not implemented.') except Exception as e: return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') " 57380,"def write(project: Project, prime_dir: Path, *, arch: str, arch_triplet: str): """"""Create a snap.yaml file."""""" meta_dir = prime_dir / ""meta"" meta_dir.mkdir(parents=True, exist_ok=True) assumes: Set[str] = set() snap_apps: Dict[str, SnapApp] = {} if project.apps: for name, app in project.apps.items(): app_sockets: Dict[str, Socket] = {} if app.sockets: for socket_name, socket in app.sockets.items(): app_sockets[socket_name] = Socket( listen_stream=socket.listen_stream, socket_mode=socket.socket_mode, ) if app.command_chain: assumes.add(""command-chain"") snap_apps[name] = SnapApp( command=app.command, autostart=app.autostart, common_id=app.common_id, bus_name=app.bus_name, completer=app.completer, stop_command=app.stop_command, post_stop_command=app.post_stop_command, start_timeout=app.start_timeout, stop_timeout=app.stop_timeout, watchdog_timeout=app.watchdog_timeout, reload_command=app.reload_command, restart_delay=app.restart_delay, timer=app.timer, daemon=app.daemon, after=app.after if app.after else None, before=app.before if app.before else None, refresh_mode=app.refresh_mode, stop_mode=app.stop_mode, restart_condition=app.restart_condition, install_mode=app.install_mode, plugs=app.plugs, aliases=app.aliases, environment=app.environment, command_chain=app.command_chain if app.command_chain else None, sockets=app_sockets if app_sockets else None, ) if project.hooks and any(h for h in project.hooks.values() if h.command_chain): assumes.add(""command-chain"") environment = _populate_environment(project.environment, prime_dir, arch_triplet) # mypy false positive: at this point project.version is a string version = _process_version(project.version) # type: ignore snap_metadata = SnapMetadata( name=project.name, title=project.title, version=version, summary=project.summary, description=project.description, # type: ignore license=project.license, type=project.type, architectures=[arch], base=cast(str, project.base), assumes=list(assumes) if assumes else None, epoch=project.epoch, apps=snap_apps or None, confinement=project.confinement, grade=project.grade or ""stable"", environment=environment, plugs=project.plugs, slots=project.slots, hooks=project.hooks, layout=project.layout, system_usernames=project.system_usernames, ) yaml.add_representer(str, _repr_str, Dumper=yaml.SafeDumper) yaml_data = snap_metadata.yaml( by_alias=True, exclude_none=True, allow_unicode=True, sort_keys=False, width=1000, ) snap_yaml = meta_dir / ""snap.yaml"" snap_yaml.write_text(yaml_data) ","def write(project: Project, prime_dir: Path, *, arch: str, arch_triplet: str): """"""Create a snap.yaml file."""""" meta_dir = prime_dir / ""meta"" meta_dir.mkdir(parents=True, exist_ok=True) assumes: Set[str] = set() snap_apps: Dict[str, SnapApp] = {} if project.apps: for name, app in project.apps.items(): app_sockets: Dict[str, Socket] = {} if app.sockets: for socket_name, socket in app.sockets.items(): app_sockets[socket_name] = Socket( listen_stream=socket.listen_stream, socket_mode=socket.socket_mode, ) if app.command_chain: assumes.add(""command-chain"") snap_apps[name] = SnapApp( command=app.command, autostart=app.autostart, common_id=app.common_id, bus_name=app.bus_name, completer=app.completer, stop_command=app.stop_command, post_stop_command=app.post_stop_command, start_timeout=app.start_timeout, stop_timeout=app.stop_timeout, watchdog_timeout=app.watchdog_timeout, reload_command=app.reload_command, restart_delay=app.restart_delay, timer=app.timer, daemon=app.daemon, after=app.after if app.after else None, before=app.before if app.before else None, refresh_mode=app.refresh_mode, stop_mode=app.stop_mode, restart_condition=app.restart_condition, install_mode=app.install_mode, plugs=app.plugs, aliases=app.aliases, environment=app.environment, command_chain=app.command_chain if app.command_chain else None, sockets=app_sockets if app_sockets else None, ) if project.hooks and any(h for h in project.hooks.values() if h.command_chain): assumes.add(""command-chain"") environment = _populate_environment(project.environment, prime_dir, arch_triplet) if project.version is None: raise ValueError(""'project.version' cannot be None"") version = _process_version(project.version) # type: ignore snap_metadata = SnapMetadata( name=project.name, title=project.title, version=version, summary=project.summary, description=project.description, # type: ignore license=project.license, type=project.type, architectures=[arch], base=cast(str, project.base), assumes=list(assumes) if assumes else None, epoch=project.epoch, apps=snap_apps or None, confinement=project.confinement, grade=project.grade or ""stable"", environment=environment, plugs=project.plugs, slots=project.slots, hooks=project.hooks, layout=project.layout, system_usernames=project.system_usernames, ) yaml.add_representer(str, _repr_str, Dumper=yaml.SafeDumper) yaml_data = snap_metadata.yaml( by_alias=True, exclude_none=True, allow_unicode=True, sort_keys=False, width=1000, ) snap_yaml = meta_dir / ""snap.yaml"" snap_yaml.write_text(yaml_data) " 41575,"def main(root_data, output_data): # Curate the contents of the dataset to keep only folders and sort them contents_ds = [subdir for subdir in os.listdir(root_data) if os.path.isdir(os.path.join(root_data, subdir))] contents_ds.sort() # Loop across contents of each subdirectory for subdir in contents_ds: # Define subject id sub_id = ""sub-demoMouse"" + subdir.split('_')[3] # Define sample id sample_id = subdir.split('_')[4] # Get the path of each subdirectory path_subdir = os.path.join(root_data, subdir) # Get the contents of each subdirectory contents_subdir = os.listdir(path_subdir) # Define final bids subject id sub_bids_full = sub_id + ""_sample-"" + sample_id # Loop across the contents of each subdirectory for file in contents_subdir: # Get the path of each file path_file_in = os.path.join(path_subdir, file) # Check if the filename corresponds to the one in the images dictionary if file in images: # Most files go into the subject's data folder path_sub_id_dir_out = os.path.join(output_data, sub_id, 'microscopy') # Define the output file path path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + images[file]) # Check if the filename corresponds to the one in the derivatives dictionary elif file in der: # Derivatives go somewhere else path_sub_id_dir_out = os.path.join(output_data, 'derivatives', 'labels', sub_id, 'microscopy') # Define the output file path path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + der[file]) else: # not a file we recognize continue # Create output subdirecotries and copy files to output os.makedirs(os.path.dirname(path_file_out), exist_ok=True) shutil.copyfile(path_file_in, path_file_out) # Generate subject list sub_list = sorted(d for d in os.listdir(output_data) if d.startswith(""sub-"")) # Now that everything is curated, fill in the metadata for sub_id in sub_list: create_json_sidecar(output_data, sub_id) # Create participants.tsv and samples.tsv with open(output_data + '/samples.tsv', 'w') as samples, \ open(output_data + '/participants.tsv', 'w') as participants: tsv_writer_samples = csv.writer(samples, delimiter='\t', lineterminator='\n') tsv_writer_samples.writerow([""sample_id"", ""participant_id"", ""sample_type""]) tsv_writer_participants = csv.writer(participants, delimiter='\t', lineterminator='\n') tsv_writer_participants.writerow([""participant_id"", ""species""]) for subject in sub_list: row_sub = [] row_sub.append(subject) row_sub.append('mus musculus') tsv_writer_participants.writerow(row_sub) subject_samples = sorted(glob.glob(os.path.join(output_data, subject, 'microscopy', '*.png'))) for file_sample in subject_samples: row_sub_samples = [] row_sub_samples.append(os.path.basename(file_sample).split('_')[1]) row_sub_samples.append(subject) row_sub_samples.append('tissue') tsv_writer_samples.writerow(row_sub_samples) # Create dataset_description.json dataset_description = {""Name"": ""data_axondeepseg_tem"", ""BIDSVersion"": ""1.7.0"", ""License"": ""MIT"" } with open(output_data + '/dataset_description.json', 'w') as json_file: json.dump(dataset_description, json_file, indent=4) # Create dataset_description.json for derivatives/labels dataset_description_derivatives = {""Name"": ""data_axondeepseg_tem labels"", ""BIDSVersion"": ""1.7.0"", ""PipelineDescription"": { ""Name"": ""Axon and myelin manual segmentation labels"" }} with open(output_data + '/derivatives/labels/dataset_description.json', 'w') as json_file: json.dump(dataset_description_derivatives, json_file, indent=4) # Create participants.json data_json = { ""participant_id"": { ""Description"": ""Unique participant ID"" }, ""species"": { ""Description"": ""Binomial species name from the NCBI Taxonomy (https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi)"" } } with open(output_data + '/participants.json', 'w') as json_file: json.dump(data_json, json_file, indent=4) # Create samples.json data_json = { ""sample_id"": { ""Description"": ""Sample ID"" }, ""participant_id"": { ""Description"": ""Participant ID from whom tissue samples have been acquired"" }, ""sample_type"": { ""Description"": ""Type of sample from ENCODE Biosample Type (https://www.encodeproject.org/profiles/biosample_type)"" } } with open(output_data + '/samples.json', 'w') as json_file: json.dump(data_json, json_file, indent=4) # Create README with open(output_data + '/README', 'w') as readme_file: print(dedent(""""""\ - Generate on 2022-03-09 - Created for demo purposes""""""), file=readme_file) ","def main(root_data, output_data): # Curate the contents of the dataset to keep only folders and sort them contents_ds = [subdir for subdir in os.listdir(root_data) if os.path.isdir(os.path.join(root_data, subdir))] contents_ds.sort() # Loop across contents of each subdirectory for subdir in contents_ds: # Define subject id sub_id = ""sub-demoMouse"" + subdir.split('_')[3] # Define sample id sample_id = subdir.split('_')[4] # Get the path of each subdirectory path_subdir = os.path.join(root_data, subdir) # Get the contents of each subdirectory contents_subdir = os.listdir(path_subdir) # Define final bids subject id sub_bids_full = sub_id + ""_sample-"" + sample_id # Loop across the contents of each subdirectory for file in contents_subdir: # Get the path of each file path_file_in = os.path.join(path_subdir, file) # Check if the filename corresponds to the one in the images dictionary if file in images: # Most files go into the subject's data folder path_sub_id_dir_out = os.path.join(output_data, sub_id, 'microscopy') # Define the output file path path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + images[file]) # Check if the filename corresponds to the one in the derivatives dictionary elif file in der: # Derivatives go somewhere else path_sub_id_dir_out = os.path.join(output_data, 'derivatives', 'labels', sub_id, 'micr') # Define the output file path path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + der[file]) else: # not a file we recognize continue # Create output subdirecotries and copy files to output os.makedirs(os.path.dirname(path_file_out), exist_ok=True) shutil.copyfile(path_file_in, path_file_out) # Generate subject list sub_list = sorted(d for d in os.listdir(output_data) if d.startswith(""sub-"")) # Now that everything is curated, fill in the metadata for sub_id in sub_list: create_json_sidecar(output_data, sub_id) # Create participants.tsv and samples.tsv with open(output_data + '/samples.tsv', 'w') as samples, \ open(output_data + '/participants.tsv', 'w') as participants: tsv_writer_samples = csv.writer(samples, delimiter='\t', lineterminator='\n') tsv_writer_samples.writerow([""sample_id"", ""participant_id"", ""sample_type""]) tsv_writer_participants = csv.writer(participants, delimiter='\t', lineterminator='\n') tsv_writer_participants.writerow([""participant_id"", ""species""]) for subject in sub_list: row_sub = [] row_sub.append(subject) row_sub.append('mus musculus') tsv_writer_participants.writerow(row_sub) subject_samples = sorted(glob.glob(os.path.join(output_data, subject, 'microscopy', '*.png'))) for file_sample in subject_samples: row_sub_samples = [] row_sub_samples.append(os.path.basename(file_sample).split('_')[1]) row_sub_samples.append(subject) row_sub_samples.append('tissue') tsv_writer_samples.writerow(row_sub_samples) # Create dataset_description.json dataset_description = {""Name"": ""data_axondeepseg_tem"", ""BIDSVersion"": ""1.7.0"", ""License"": ""MIT"" } with open(output_data + '/dataset_description.json', 'w') as json_file: json.dump(dataset_description, json_file, indent=4) # Create dataset_description.json for derivatives/labels dataset_description_derivatives = {""Name"": ""data_axondeepseg_tem labels"", ""BIDSVersion"": ""1.7.0"", ""PipelineDescription"": { ""Name"": ""Axon and myelin manual segmentation labels"" }} with open(output_data + '/derivatives/labels/dataset_description.json', 'w') as json_file: json.dump(dataset_description_derivatives, json_file, indent=4) # Create participants.json data_json = { ""participant_id"": { ""Description"": ""Unique participant ID"" }, ""species"": { ""Description"": ""Binomial species name from the NCBI Taxonomy (https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi)"" } } with open(output_data + '/participants.json', 'w') as json_file: json.dump(data_json, json_file, indent=4) # Create samples.json data_json = { ""sample_id"": { ""Description"": ""Sample ID"" }, ""participant_id"": { ""Description"": ""Participant ID from whom tissue samples have been acquired"" }, ""sample_type"": { ""Description"": ""Type of sample from ENCODE Biosample Type (https://www.encodeproject.org/profiles/biosample_type)"" } } with open(output_data + '/samples.json', 'w') as json_file: json.dump(data_json, json_file, indent=4) # Create README with open(output_data + '/README', 'w') as readme_file: print(dedent(""""""\ - Generate on 2022-03-09 - Created for demo purposes""""""), file=readme_file) " 15023,"def get_age(date: dt.datetime) -> str: """""" Take a datetime and return its ""age"" as a string. The age can be in second, minute, hour, day, month or year. Only the biggest unit is considered, e.g. if it's 2 days and 3 hours, ""2 days"" will be returned. Make sure date is not in the future, or else it won't work. """""" def formatn(number: int, unit: str) -> str: """"""Add ""unit"" if it's plural."""""" if number == 1: return f""1 {unit}"" return f""{number:d} {unit}s"" def q_n_r(first: int, second: int) -> Tuple[int, int]: """"""Return quotient and remaining."""""" return first // second, first % second if type(date) is dt.datetime and not date.tzinfo: date = as_local(date) delta = now() - date day = delta.days second = delta.seconds year, day = q_n_r(day, 365) if year > 0: return formatn(year, ""year"") month, day = q_n_r(day, 30) if month > 0: return formatn(month, ""month"") if day > 0: return formatn(day, ""day"") hour, second = q_n_r(second, 3600) if hour > 0: return formatn(hour, ""hour"") minute, second = q_n_r(second, 60) if minute > 0: return formatn(minute, ""minute"") return formatn(second, ""second"") ","def get_age(date: dt.datetime) -> str: """""" Take a datetime and return its ""age"" as a string. The age can be in second, minute, hour, day, month or year. Only the biggest unit is considered, e.g. if it's 2 days and 3 hours, ""2 days"" will be returned. Make sure date is not in the future, or else it won't work. """""" def formatn(number: int, unit: str) -> str: """"""Add ""unit"" if it's plural."""""" if number == 1: return f""1 {unit}"" return f""{number:d} {unit}s"" def q_n_r(first: int, second: int) -> Tuple[int, int]: """"""Return quotient and remaining."""""" return first // second, first % second if isinstance(date, dt.datetime) and not date.tzinfo: date = as_local(date) delta = now() - date day = delta.days second = delta.seconds year, day = q_n_r(day, 365) if year > 0: return formatn(year, ""year"") month, day = q_n_r(day, 30) if month > 0: return formatn(month, ""month"") if day > 0: return formatn(day, ""day"") hour, second = q_n_r(second, 3600) if hour > 0: return formatn(hour, ""hour"") minute, second = q_n_r(second, 60) if minute > 0: return formatn(minute, ""minute"") return formatn(second, ""second"") " 10333,"def stringc(text, color, for_prompt=False): """"""String in color."""""" if ANSIBLE_COLOR: color_code = parsecolor(color) fmt = u""\033[%sm%s\033[0m"" if for_prompt: fmt = u""\001\033[%sm\002%s\001\033[0m\002"" return u""\n"".join([fmt % (color_code, t) for t in text.split(u'\n')]) else: return text ","def stringc(text, color, wrap_nonvisible_chars =False): """"""String in color."""""" if ANSIBLE_COLOR: color_code = parsecolor(color) fmt = u""\033[%sm%s\033[0m"" if for_prompt: fmt = u""\001\033[%sm\002%s\001\033[0m\002"" return u""\n"".join([fmt % (color_code, t) for t in text.split(u'\n')]) else: return text " 32157,"def fetch_incidents(client: Client): max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False) first_fetch_time = arg_to_datetime(demisto.params().get('first_fetch')).isoformat() last_run = demisto.getLastRun() last_fetch = last_run.get('last_fetch', first_fetch_time) incidentsList=[] alert_response = client.correlation_alerts() incident_data = alert_response['Data'] for inc in incident_data: if len(incidentsList) > max_results: break incident_name = inc['CorrelationAlert']['NAME'] time_stamp = inc['CorrelationAlert']['CREATEDATE']+""Z"" severityLvl = int(inc['CorrelationAlert']['RISK']) if severityLvl >=0 and severityLvl <= 5: severity = 1 elif severityLvl > 5 and severityLvl <= 7: severity = 2 elif severityLvl > 7 and severityLvl <= 9: severity = 3 elif severityLvl > 9 and severityLvl <= 10: severity = 4 else: severity = 0 # ""log"" column is stringfyed 'Log' data. inc['Log'].pop(""log"") incidentObject = {**inc['Log'], **inc['CorrelationAlert']} incident = { 'name': incident_name, 'occurred': time_stamp, 'rawJSON': json.dumps(incidentObject), ""severity"": severity, 'type': 'Crpyotsim CorrelationAlert' } incidentsList.append(incident) # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': last_fetch} return next_run, incidentsList ","def fetch_incidents(client: Client): max_results = arg_to_number(arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False) first_fetch_time = arg_to_datetime(demisto.params().get('first_fetch')).isoformat() last_run = demisto.getLastRun() last_fetch = last_run.get('last_fetch', first_fetch_time) incidentsList=[] alert_response = client.correlation_alerts() incident_data = alert_response['Data'] for inc in incident_data: if len(incidentsList) > max_results: break incident_name = inc['CorrelationAlert']['NAME'] time_stamp = inc['CorrelationAlert']['CREATEDATE']+""Z"" severity_level = int(inc['CorrelationAlert']['RISK']) if severityLvl >=0 and severityLvl <= 5: severity = 1 elif severityLvl > 5 and severityLvl <= 7: severity = 2 elif severityLvl > 7 and severityLvl <= 9: severity = 3 elif severityLvl > 9 and severityLvl <= 10: severity = 4 else: severity = 0 # ""log"" column is stringfyed 'Log' data. inc['Log'].pop(""log"") incidentObject = {**inc['Log'], **inc['CorrelationAlert']} incident = { 'name': incident_name, 'occurred': time_stamp, 'rawJSON': json.dumps(incidentObject), ""severity"": severity, 'type': 'Crpyotsim CorrelationAlert' } incidentsList.append(incident) # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': last_fetch} return next_run, incidentsList " 29528,"def compile_filter(bpf_filter, iface=None, iface_type=None): """"""Asks Tcpdump to parse the filter, then build the matching BPF bytecode using get_bpf_pointer. """""" if not TCPDUMP: raise Scapy_Exception(""tcpdump is not available. Cannot use filter !"") fake_pcap = None tcpdump_opts = [ conf.prog.tcpdump, ""-ddd"", ""-s"", str(MTU), ] if iface_type: fake_pcap = codecs.getdecoder('hex')(FAKE_PCAP % (iface_type, ))[0] tcpdump_opts.append(""-r-"") else: tcpdump_opts.extend( [""-p"", ""-i"", (conf.iface if iface is None else iface)] ) tcpdump_opts.append(bpf_filter) try: process = subprocess.Popen( tcpdump_opts, stdin=subprocess.PIPE if fake_pcap else None, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except OSError as ex: raise Scapy_Exception(""Failed to attach filter: %s"" % ex) if fake_pcap: lines, err = process.communicate(fake_pcap) else: lines, err = process.communicate() ret = process.returncode if ret: raise Scapy_Exception( ""Failed to attach filter: tcpdump returned: %s"" % err ) lines = lines.strip().split(b""\n"") return get_bpf_pointer(lines) ","def compile_filter(bpf_filter, iface=None, iface_type=None): """"""Asks Tcpdump to parse the filter, then build the matching BPF bytecode using get_bpf_pointer. """""" if not TCPDUMP: raise Scapy_Exception(""tcpdump is not available. Cannot use filter !"") fake_pcap = None tcpdump_opts = [ conf.prog.tcpdump, ""-ddd"", ""-s"", str(MTU), ] if iface_type: fake_pcap = FAKE_PCAP % struct.pack('B', iface_type) tcpdump_opts.append(""-r-"") else: tcpdump_opts.extend( [""-p"", ""-i"", (conf.iface if iface is None else iface)] ) tcpdump_opts.append(bpf_filter) try: process = subprocess.Popen( tcpdump_opts, stdin=subprocess.PIPE if fake_pcap else None, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except OSError as ex: raise Scapy_Exception(""Failed to attach filter: %s"" % ex) if fake_pcap: lines, err = process.communicate(fake_pcap) else: lines, err = process.communicate() ret = process.returncode if ret: raise Scapy_Exception( ""Failed to attach filter: tcpdump returned: %s"" % err ) lines = lines.strip().split(b""\n"") return get_bpf_pointer(lines) " 20769,"def parse_po_file(filename: str) -> List[Msg]: messages = [] with open(filename) as f: iterator = iter(f.readlines()) while True: try: line = next(iterator) if line[0:7] == ""msgctxt"": # Start of a translation item block msg = Msg() msg.msgctxt = line while True: line = next(iterator) if line[0:5] == ""msgid"": msg.msgid = line break while True: # msgstr can be split over multiple lines line = next(iterator) if line == ""\n"": break if line[0:6] == ""msgstr"": msg.msgstr = line else: msg.msgstr += line messages.append(msg) except StopIteration: return messages ","def parsePOFile(filename: str) -> List[Msg]: messages = [] with open(filename) as f: iterator = iter(f.readlines()) while True: try: line = next(iterator) if line[0:7] == ""msgctxt"": # Start of a translation item block msg = Msg() msg.msgctxt = line while True: line = next(iterator) if line[0:5] == ""msgid"": msg.msgid = line break while True: # msgstr can be split over multiple lines line = next(iterator) if line == ""\n"": break if line[0:6] == ""msgstr"": msg.msgstr = line else: msg.msgstr += line messages.append(msg) except StopIteration: return messages " 14055,"def _read_feather(path, columns=None, **kwargs): """""" Load a Feather object from the file path, returning a GeoDataFrame. You can read a subset of columns in the file using the ``columns`` parameter. However, the structure of the returned GeoDataFrame will depend on which columns you read: * if no geometry columns are read, this will raise a ``ValueError`` - you should use the pandas `read_feather` method instead. * if the primary geometry column saved to this file is not included in columns, the first available geometry column will be set as the geometry column of the returned GeoDataFrame. Requires 'pyarrow' >= 0.17. .. versionadded:: 0.8 Parameters ---------- path : str, path object columns : list-like of strings, default=None If not None, only these columns will be read from the file. If the primary geometry column is not included, the first secondary geometry read from the file will be set as the geometry column of the returned GeoDataFrame. If no geometry columns are present, a ``ValueError`` will be raised. **kwargs Any additional kwargs passed to pyarrow.feather.read_table(). Returns ------- GeoDataFrame Examples -------- >>> df = geopandas.read_feather(""data.feather) # doctest: +SKIP Specifying columns to read: >>> df = geopandas.read_feather( ... ""data.feather, ... columns=[""geometry"", ""pop_est""] ... ) # doctest: +SKIP """""" feather = import_optional_dependency( ""pyarrow.feather"", extra=""pyarrow is required for Feather support."" ) # TODO move this into `import_optional_dependency` import pyarrow if pyarrow.__version__ < LooseVersion(""0.17.0""): raise ImportError(""pyarrow >= 0.17 required for Feather support"") table = feather.read_table(path, columns=columns, **kwargs) return _arrow_to_geopandas(table) ","def _read_feather(path, columns=None, **kwargs): """""" Load a Feather object from the file path, returning a GeoDataFrame. You can read a subset of columns in the file using the ``columns`` parameter. However, the structure of the returned GeoDataFrame will depend on which columns you read: * if no geometry columns are read, this will raise a ``ValueError`` - you should use the pandas `read_feather` method instead. * if the primary geometry column saved to this file is not included in columns, the first available geometry column will be set as the geometry column of the returned GeoDataFrame. Requires 'pyarrow' >= 0.17. .. versionadded:: 0.8 Parameters ---------- path : str, path object columns : list-like of strings, default=None If not None, only these columns will be read from the file. If the primary geometry column is not included, the first secondary geometry read from the file will be set as the geometry column of the returned GeoDataFrame. If no geometry columns are present, a ``ValueError`` will be raised. **kwargs Any additional kwargs passed to pyarrow.feather.read_table(). Returns ------- GeoDataFrame Examples -------- >>> df = geopandas.read_feather(""data.feather) # doctest: +SKIP Specifying columns to read: >>> df = geopandas.read_feather( ... ""data.feather"", ... columns=[""geometry"", ""pop_est""] ... ) # doctest: +SKIP """""" feather = import_optional_dependency( ""pyarrow.feather"", extra=""pyarrow is required for Feather support."" ) # TODO move this into `import_optional_dependency` import pyarrow if pyarrow.__version__ < LooseVersion(""0.17.0""): raise ImportError(""pyarrow >= 0.17 required for Feather support"") table = feather.read_table(path, columns=columns, **kwargs) return _arrow_to_geopandas(table) " 56591,"def dict_to_dataset( data, *, attrs=None, library=None, coords=None, dims=None, skip_event_dims=None ): """"""Convert a dictionary of numpy arrays to an xarray.Dataset. Parameters ---------- data : dict[str] -> ndarray Data to convert. Keys are variable names. attrs : dict Json serializable metadata to attach to the dataset, in addition to defaults. library : module Library used for performing inference. Will be attached to the attrs metadata. coords : dict[str] -> ndarray Coordinates for the dataset dims : dict[str] -> list[str] Dimensions of each variable. The keys are variable names, values are lists of coordinates. skip_event_dims : bool If True, cut extra dims whenever present to match the shape of the data. Necessary for PPLs who have the same name in both observed data and log likelihood groups share variable names to account for their different shapes. Returns ------- xr.Dataset Examples -------- dict_to_dataset({'x': np.random.randn(4, 100), 'y': np.random.rand(4, 100)}) """""" if dims is None: dims = {} data_vars = {} for key, values in data.items(): data_vars[key] = numpy_to_data_array( values, var_name=key, coords=coords, dims=dims.get(key), skip_event_dims=skip_event_dims ) return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library)) ","def dict_to_dataset( data, *, attrs=None, library=None, coords=None, dims=None, skip_event_dims=None ): """"""Convert a dictionary of numpy arrays to an xarray.Dataset. Parameters ---------- data : dict[str] -> ndarray Data to convert. Keys are variable names. attrs : dict Json serializable metadata to attach to the dataset, in addition to defaults. library : module Library used for performing inference. Will be attached to the attrs metadata. coords : dict[str] -> ndarray Coordinates for the dataset dims : dict[str] -> list[str] Dimensions of each variable. The keys are variable names, values are lists of coordinates. skip_event_dims : bool If True, cut extra dims whenever present to match the shape of the data. Necessary for PPLs which have the same name in both observed data and log likelihood groups, to account for their different shapes when observations are multivariate. Returns ------- xr.Dataset Examples -------- dict_to_dataset({'x': np.random.randn(4, 100), 'y': np.random.rand(4, 100)}) """""" if dims is None: dims = {} data_vars = {} for key, values in data.items(): data_vars[key] = numpy_to_data_array( values, var_name=key, coords=coords, dims=dims.get(key), skip_event_dims=skip_event_dims ) return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library)) " 35329,"def convert_apdl_block(apdl_strings, loglevel=""WARNING"", auto_exit=True, line_ending=None, exec_file=None, macros_as_functions=True, use_function_names=True, show_log = False): """"""Converts an ANSYS input string to a python PyMAPDL string. Parameters ---------- apdl_string : str APDL strings or list of strings to convert. filename_out : str Filename of the python script to write a translation to. loglevel : str, optional Logging level of the ansys object within the script. auto_exit : bool, optional Adds a line to the end of the script to exit MAPDL. Default ``True``. line_ending : str, optional When None, automatically determined by OS being used. macros_as_functions : bool, optional Attempt to convert MAPDL macros to python functions. use_function_names : bool, optional Convert MAPDL functions to ansys.mapdl.core.Mapdl class methods. When ``True``, the MAPDL command ""K"" will be converted to ``mapdl.k``. When ``False``, it will be converted to ``mapdl.run('k')``. show_log : bool, optional Print the converted commands using a logger (from ``logging`` Python module). Returns ------- list List of lines translated. """""" translator = _convert(apdl_strings, loglevel=loglevel, auto_exit=auto_exit, line_ending=line_ending, exec_file=exec_file, macros_as_functions=macros_as_functions, use_function_names=use_function_names, show_log=show_log) if isinstance(apdl_strings, str): return translator.line_ending.join(translator.lines) return translator.lines ","def convert_apdl_block(apdl_strings, loglevel=""WARNING"", auto_exit=True, line_ending=None, exec_file=None, macros_as_functions=True, use_function_names=True, show_log=False): """"""Converts an ANSYS input string to a python PyMAPDL string. Parameters ---------- apdl_string : str APDL strings or list of strings to convert. filename_out : str Filename of the python script to write a translation to. loglevel : str, optional Logging level of the ansys object within the script. auto_exit : bool, optional Adds a line to the end of the script to exit MAPDL. Default ``True``. line_ending : str, optional When None, automatically determined by OS being used. macros_as_functions : bool, optional Attempt to convert MAPDL macros to python functions. use_function_names : bool, optional Convert MAPDL functions to ansys.mapdl.core.Mapdl class methods. When ``True``, the MAPDL command ""K"" will be converted to ``mapdl.k``. When ``False``, it will be converted to ``mapdl.run('k')``. show_log : bool, optional Print the converted commands using a logger (from ``logging`` Python module). Returns ------- list List of lines translated. """""" translator = _convert(apdl_strings, loglevel=loglevel, auto_exit=auto_exit, line_ending=line_ending, exec_file=exec_file, macros_as_functions=macros_as_functions, use_function_names=use_function_names, show_log=show_log) if isinstance(apdl_strings, str): return translator.line_ending.join(translator.lines) return translator.lines " 53603,"def infer_dataclass_field_call( node: Call, ctx: context.InferenceContext = None ) -> Generator: """"""Inference tip for dataclass field calls."""""" if isinstance(node.parent, (AnnAssign, Assign)): field_call = node.parent.value default_type, default = _get_field_default(field_call) if not default_type: yield Uninferable elif default_type == ""default"": yield from default.infer(context=ctx) else: new_call = parse(default.as_string()).body[0].value new_call.parent = field_call.parent yield from new_call.infer(context=ctx) ","def infer_dataclass_field_call( node: Call, ctx: context.InferenceContext = None ) -> Generator: """"""Inference tip for dataclass field calls."""""" if not isinstance(node.parent, (AnnAssign, Assign)): return field_call = node.parent.value default_type, default = _get_field_default(field_call) if not default_type: yield Uninferable elif default_type == ""default"": yield from default.infer(context=ctx) else: new_call = parse(default.as_string()).body[0].value new_call.parent = field_call.parent yield from new_call.infer(context=ctx) " 49554,"def concat( dfs, axis=0, join=""outer"", interleave_partitions=False, ignore_unknown_divisions=False, ): """""" Concatenate DataFrames along rows. - When axis=0 (default), concatenate DataFrames row-wise: - If all divisions are known and ordered, concatenate DataFrames keeping divisions. When divisions are not ordered, specifying interleave_partition=True allows concatenate divisions each by each. - If any of division is unknown, concatenate DataFrames resetting its division to unknown (None) - When axis=1, concatenate DataFrames column-wise: - Allowed if all divisions are known. - If any of division is unknown, it raises ValueError. Parameters ---------- dfs : list List of dask.DataFrames to be concatenated axis : {0, 1, 'index', 'columns'}, default 0 The axis to concatenate along join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis interleave_partitions : bool, default False Whether to concatenate DataFrames ignoring its order. If True, every divisions are concatenated each by each. ignore_unknown_divisions: boolean, default False Whether to warn when concatenating dask series/dataframes with unknown divisions. If True the warning won't be emitted. Notes ----- This differs in from ``pd.concat`` in the when concatenating Categoricals with different categories. Pandas currently coerces those to objects before concatenating. Coercing to objects is very expensive for large arrays, so dask preserves the Categoricals by taking the union of the categories. Examples -------- If all divisions are known and ordered, divisions are kept. >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP dd.DataFrame Unable to concatenate if divisions are not ordered. >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP ValueError: All inputs have known divisions which cannot be concatenated in order. Specify interleave_partitions=True to ignore order Specify interleave_partitions=True to ignore the division order. >>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP dd.DataFrame If any of division is unknown, the result division will be unknown >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP dd.DataFrame If we set ignore_unknown_divisions=True, the warning won't be emitted >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP dd.DataFrame Different categoricals are unioned >> dd.concat([ # doctest: +SKIP ... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1), ... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1), ... ], interleave_partitions=True).dtype CategoricalDtype(categories=['a', 'b', 'c'], ordered=False) """""" if not isinstance(dfs, list): raise TypeError(""dfs must be a list of DataFrames/Series objects"") if len(dfs) == 0: raise ValueError(""No objects to concatenate"") if len(dfs) == 1: if axis == 1 and isinstance(dfs[0], Series): return dfs[0].to_frame() else: return dfs[0] if join not in (""inner"", ""outer""): raise ValueError(""'join' must be 'inner' or 'outer'"") axis = DataFrame._validate_axis(axis) dasks = [df for df in dfs if isinstance(df, _Frame)] dfs = _maybe_from_pandas(dfs) if axis == 1: if all(df.known_divisions for df in dasks): return concat_indexed_dataframes(dfs, axis=axis, join=join) elif ( len(dasks) == len(dfs) and all(not df.known_divisions for df in dfs) and len({df.npartitions for df in dasks}) == 1 ): if not ignore_unknown_divisions: warnings.warn( ""Concatenating dataframes with unknown divisions.\n"" ""We're assuming that the indexes of each dataframes"" "" are \n aligned. This assumption is not generally "" ""safe."" ) return concat_unindexed_dataframes(dfs) else: raise ValueError( ""Unable to concatenate DataFrame with unknown "" ""division specifying axis=1"" ) else: if all(df.known_divisions for df in dasks): # each DataFrame's division must be greater than previous one if all( dfs[i].divisions[-1] < dfs[i + 1].divisions[0] for i in range(len(dfs) - 1) ): divisions = [] for df in dfs[:-1]: # remove last to concatenate with next divisions += df.divisions[:-1] divisions += dfs[-1].divisions return stack_partitions(dfs, divisions, join=join) elif interleave_partitions: return concat_indexed_dataframes(dfs, join=join) else: divisions = [None] * (sum([df.npartitions for df in dfs]) + 1) return stack_partitions(dfs, divisions, join=join) else: divisions = [None] * (sum([df.npartitions for df in dfs]) + 1) return stack_partitions(dfs, divisions, join=join) ","def concat( dfs, axis=0, join=""outer"", interleave_partitions=False, ignore_unknown_divisions=False, ): """""" Concatenate DataFrames along rows. - When axis=0 (default), concatenate DataFrames row-wise: - If all divisions are known and ordered, concatenate DataFrames keeping divisions. When divisions are not ordered, specifying interleave_partition=True allows concatenate divisions each by each. - If any of division is unknown, concatenate DataFrames resetting its division to unknown (None) - When axis=1, concatenate DataFrames column-wise: - Allowed if all divisions are known. - If any of division is unknown, it raises ValueError. Parameters ---------- dfs : list List of dask.DataFrames to be concatenated axis : {0, 1, 'index', 'columns'}, default 0 The axis to concatenate along join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis interleave_partitions : bool, default False Whether to concatenate DataFrames ignoring its order. If True, every divisions are concatenated each by each. ignore_unknown_divisions: boolean, default False By default a warning is raised if any input has unknown divisions. Set to True to disable this warning. Notes ----- This differs in from ``pd.concat`` in the when concatenating Categoricals with different categories. Pandas currently coerces those to objects before concatenating. Coercing to objects is very expensive for large arrays, so dask preserves the Categoricals by taking the union of the categories. Examples -------- If all divisions are known and ordered, divisions are kept. >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP dd.DataFrame Unable to concatenate if divisions are not ordered. >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP ValueError: All inputs have known divisions which cannot be concatenated in order. Specify interleave_partitions=True to ignore order Specify interleave_partitions=True to ignore the division order. >>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP dd.DataFrame If any of division is unknown, the result division will be unknown >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b]) # doctest: +SKIP dd.DataFrame If we set ignore_unknown_divisions=True, the warning won't be emitted >>> a # doctest: +SKIP dd.DataFrame >>> b # doctest: +SKIP dd.DataFrame >>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP dd.DataFrame Different categoricals are unioned >> dd.concat([ # doctest: +SKIP ... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1), ... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1), ... ], interleave_partitions=True).dtype CategoricalDtype(categories=['a', 'b', 'c'], ordered=False) """""" if not isinstance(dfs, list): raise TypeError(""dfs must be a list of DataFrames/Series objects"") if len(dfs) == 0: raise ValueError(""No objects to concatenate"") if len(dfs) == 1: if axis == 1 and isinstance(dfs[0], Series): return dfs[0].to_frame() else: return dfs[0] if join not in (""inner"", ""outer""): raise ValueError(""'join' must be 'inner' or 'outer'"") axis = DataFrame._validate_axis(axis) dasks = [df for df in dfs if isinstance(df, _Frame)] dfs = _maybe_from_pandas(dfs) if axis == 1: if all(df.known_divisions for df in dasks): return concat_indexed_dataframes(dfs, axis=axis, join=join) elif ( len(dasks) == len(dfs) and all(not df.known_divisions for df in dfs) and len({df.npartitions for df in dasks}) == 1 ): if not ignore_unknown_divisions: warnings.warn( ""Concatenating dataframes with unknown divisions.\n"" ""We're assuming that the indexes of each dataframes"" "" are \n aligned. This assumption is not generally "" ""safe."" ) return concat_unindexed_dataframes(dfs) else: raise ValueError( ""Unable to concatenate DataFrame with unknown "" ""division specifying axis=1"" ) else: if all(df.known_divisions for df in dasks): # each DataFrame's division must be greater than previous one if all( dfs[i].divisions[-1] < dfs[i + 1].divisions[0] for i in range(len(dfs) - 1) ): divisions = [] for df in dfs[:-1]: # remove last to concatenate with next divisions += df.divisions[:-1] divisions += dfs[-1].divisions return stack_partitions(dfs, divisions, join=join) elif interleave_partitions: return concat_indexed_dataframes(dfs, join=join) else: divisions = [None] * (sum([df.npartitions for df in dfs]) + 1) return stack_partitions(dfs, divisions, join=join) else: divisions = [None] * (sum([df.npartitions for df in dfs]) + 1) return stack_partitions(dfs, divisions, join=join) " 31151,"def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif demisto.command() == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif demisto.command() == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif demisto.command() == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") ","def main(): SESSION.proxies = handle_proxy() client = SixgillEnrichClient( demisto.params()[""client_id""], demisto.params()[""client_secret""], CHANNEL_CODE, demisto, SESSION, VERIFY ) command = demisto.command() demisto.info(f""Command being called is {command}"") commands: Dict[str, Callable] = { ""test-module"": test_module_command, } try: if demisto.command() == ""ip"": return_results(ip_reputation_command(client, demisto.args())) elif demisto.command() == ""domain"": return_results(domain_reputation_command(client, demisto.args())) elif demisto.command() == ""url"": return_results(url_reputation_command(client, demisto.args())) elif demisto.command() == ""file"": return_results(file_reputation_command(client, demisto.args())) elif demisto.command() == ""actor"": return_results(actor_reputation_command(client, demisto.args())) elif command == ""post_id"": return_results(postid_reputation_command(client, demisto.args())) else: readable_output, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output, outputs, raw_response) except Exception as e: demisto.error(traceback.format_exc()) return_error(f""Error failed to execute {demisto.command()}, error: [{e}]"") " 52290,"def main(): # Initialization fname_data = '' interp_factor = param.interp_factor remove_temp_files = param.remove_temp_files verbose = param.verbose suffix = param.suffix smoothing_sigma = param.smoothing_sigma # start timer start_time = time.time() # Parameters for debug mode if param.debug: fname_data = os.path.join(__data_dir__, 'sct_testing_data', 't2', 't2_seg.nii.gz') remove_temp_files = 0 param.mask_size = 10 else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:v:r:s:') except getopt.GetoptError: usage() raise SystemExit(2) if not opts: usage() raise SystemExit(2) for opt, arg in opts: if opt == '-h': usage() return elif opt in ('-i'): fname_data = arg elif opt in ('-r'): remove_temp_files = int(arg) elif opt in ('-s'): smoothing_sigma = arg elif opt in ('-v'): verbose = int(arg) # display usage if a mandatory argument is not provided if fname_data == '': usage() raise SystemExit(2) # printv(arguments) printv('\nCheck parameters:') printv(' segmentation ........... ' + fname_data) printv(' interp factor .......... ' + str(interp_factor)) printv(' smoothing sigma ........ ' + str(smoothing_sigma)) # check existence of input files printv('\nCheck existence of input files...') check_file_exist(fname_data, verbose) # Extract path, file and extension path_data, file_data, ext_data = extract_fname(fname_data) path_tmp = tmp_create(basename=""binary_to_trilinear"") printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) im_input = convert(Image(fname_data)) im_input.save(os.path.join(path_tmp, ""data.nii""), mutable=True, verbose=param.verbose) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get dimensions of data printv('\nGet dimensions of data...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image('data.nii').dim printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # upsample data printv('\nUpsample data...', verbose) sct_resample.main([ ""-i"", ""data.nii"", ""-x"", ""linear"", ""-vox"", str(nx * interp_factor) + 'x' + str(ny * interp_factor) + 'x' + str(nz * interp_factor), ""-o"", ""data_up.nii"", ]) # Smooth along centerline printv('\nSmooth along centerline...', verbose) sct_smooth_spinalcord.main([""-i"", ""data_up.nii"", ""-s"", ""data_up.nii"", ""-smooth"", str(smoothing_sigma), ""-r"", str(remove_temp_files), ""-v"", str(verbose), ]) # downsample data printv('\nDownsample data...', verbose) sct_resample.main([ ""-i"", ""data_up_smooth.nii"", ""-x"", ""linear"", ""-vox"", str(nx) + 'x' + str(ny) + 'x' + str(nz), ""-o"", ""data_up_smooth_down.nii"", ]) # come back os.chdir(curdir) # Generate output files printv('\nGenerate output files...') fname_out = generate_output_file(os.path.join(path_tmp, ""data_up_smooth_down.nii""), '' + file_data + suffix + ext_data) # Delete temporary files if remove_temp_files == 1: printv('\nRemove temporary files...') rmtree(path_tmp) # display elapsed time elapsed_time = time.time() - start_time printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's') # to view results printv('\nTo view results, type:') printv('fslview ' + file_data + ' ' + file_data + suffix + ' &\n') ","def main(): # Initialization fname_data = '' interp_factor = param.interp_factor remove_temp_files = param.remove_temp_files verbose = param.verbose suffix = param.suffix smoothing_sigma = param.smoothing_sigma # start timer start_time = time.time() # Parameters for debug mode if param.debug: fname_data = os.path.join(__data_dir__, 'sct_testing_data', 't2', 't2_seg.nii.gz') remove_temp_files = 0 param.mask_size = 10 else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:v:r:s:') except getopt.GetoptError: usage() raise SystemExit(2) if not opts: usage() raise SystemExit(2) for opt, arg in opts: if opt == '-h': usage() return elif opt in ('-i'): fname_data = arg elif opt in ('-r'): remove_temp_files = int(arg) elif opt in ('-s'): smoothing_sigma = arg elif opt in ('-v'): verbose = int(arg) # display usage if a mandatory argument is not provided if fname_data == '': usage() raise SystemExit(2) # printv(arguments) printv('\nCheck parameters:') printv(' segmentation ........... ' + fname_data) printv(' interp factor .......... ' + str(interp_factor)) printv(' smoothing sigma ........ ' + str(smoothing_sigma)) # check existence of input files printv('\nCheck existence of input files...') check_file_exist(fname_data, verbose) # Extract path, file and extension path_data, file_data, ext_data = extract_fname(fname_data) path_tmp = tmp_create(basename=""binary_to_trilinear"") printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) im_input = convert(Image(fname_data)) im_input.save(os.path.join(path_tmp, ""data.nii""), mutable=True, verbose=param.verbose) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get dimensions of data printv('\nGet dimensions of data...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image('data.nii').dim printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # upsample data printv('\nUpsample data...', verbose) sct_resample.main([ ""-i"", ""data.nii"", ""-x"", ""linear"", ""-vox"", str(nx * interp_factor) + 'x' + str(ny * interp_factor) + 'x' + str(nz * interp_factor), ""-o"", ""data_up.nii"", ]) # Smooth along centerline printv('\nSmooth along centerline...', verbose) sct_smooth_spinalcord.main([ ""-i"", ""data_up.nii"", ""-s"", ""data_up.nii"", ""-smooth"", str(smoothing_sigma), ""-r"", str(remove_temp_files), ""-v"", str(verbose), ]) # downsample data printv('\nDownsample data...', verbose) sct_resample.main([ ""-i"", ""data_up_smooth.nii"", ""-x"", ""linear"", ""-vox"", str(nx) + 'x' + str(ny) + 'x' + str(nz), ""-o"", ""data_up_smooth_down.nii"", ]) # come back os.chdir(curdir) # Generate output files printv('\nGenerate output files...') fname_out = generate_output_file(os.path.join(path_tmp, ""data_up_smooth_down.nii""), '' + file_data + suffix + ext_data) # Delete temporary files if remove_temp_files == 1: printv('\nRemove temporary files...') rmtree(path_tmp) # display elapsed time elapsed_time = time.time() - start_time printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's') # to view results printv('\nTo view results, type:') printv('fslview ' + file_data + ' ' + file_data + suffix + ' &\n') " 6893,"def upload_from_folder(path, is_private, dropbox_folder, dropbox_client, did_not_upload, error_log): if not os.path.exists(path): return if is_fresh_upload(): response = get_uploaded_files_meta(dropbox_folder, dropbox_client) else: response = frappe._dict({""entries"": []}) path = text_type(path) for f in frappe.get_all(""File"", filters={""is_folder"": 0, ""is_private"": is_private, ""uploaded_to_dropbox"": 0}, fields=['file_url', 'name', 'file_name']): if not f.file_url: filename = f.filename.rsplit('/',1)[-1] else: filename = f.file_url.rsplit('/',1)[-1] filepath = os.path.join(path, filename) if filename in ignore_list: continue found = False for file_metadata in response.entries: try: if (os.path.basename(filepath) == file_metadata.name and os.stat(encode(filepath)).st_size == int(file_metadata.size)): found = True update_file_dropbox_status(f.name) break except Exception: error_log.append(frappe.get_traceback()) if not found: try: upload_file_to_dropbox(filepath, dropbox_folder, dropbox_client) update_file_dropbox_status(f.name) except Exception: did_not_upload.append(filepath) error_log.append(frappe.get_traceback()) ","def upload_from_folder(path, is_private, dropbox_folder, dropbox_client, did_not_upload, error_log): if not os.path.exists(path): return if is_fresh_upload(): response = get_uploaded_files_meta(dropbox_folder, dropbox_client) else: response = frappe._dict({""entries"": []}) path = text_type(path) for f in frappe.get_all(""File"", filters={""is_folder"": 0, ""is_private"": is_private, ""uploaded_to_dropbox"": 0}, fields=['file_url', 'name', 'file_name']): if not f.file_url: filename = f.filename else: filename = f.file_url.rsplit('/',1)[-1] filepath = os.path.join(path, filename) if filename in ignore_list: continue found = False for file_metadata in response.entries: try: if (os.path.basename(filepath) == file_metadata.name and os.stat(encode(filepath)).st_size == int(file_metadata.size)): found = True update_file_dropbox_status(f.name) break except Exception: error_log.append(frappe.get_traceback()) if not found: try: upload_file_to_dropbox(filepath, dropbox_folder, dropbox_client) update_file_dropbox_status(f.name) except Exception: did_not_upload.append(filepath) error_log.append(frappe.get_traceback()) " 43980,"def _jax_is_independent_ana(func, *args, **kwargs): """"""Test analytically whether a function is independent of its arguments using JAX. Args: func (callable): Function to test for independence args (tuple): Arguments for the function with respect to which to test for independence kwargs (dict): Keyword arguments for the function at which (but not with respect to which) to test for independence Returns: bool: Whether the function seems to not depend on it ``args`` analytically. That is, an output of ``True`` means that the ``args`` do *not* feed into the output. In JAX, we test this by constructing the VJP of the passed function and inspecting its signature: The first argument of the output of ``jax.vjp`` is a ``Partial``. If *any* processing happens to any input, the arguments of that ``Partial`` are unequal to ``((),)`. Functions that depend on the input in a trivial manner, i.e. without processing it, will go undetected by this. Therefore we also test the arguments of the *function* of the above ``Partial``. The first of these arguments is a list of tuples and if the first entry of the first tuple is not ``None``, the input arguments are detected to actually feed into the output. .. warning:: This is an experimental function and unknown edge cases may exist to this two-stage test. """""" import jax # pylint: disable=import-outside-toplevel mapped_func = lambda *_args: func(*_args, **kwargs) # pylint: disable=unnecessary-lambda _vjp = jax.vjp(mapped_func, *args)[1] if _vjp.args[0].args != ((),): return False if _vjp.args[0].func.args[0][0][0] is not None: return False return True ","def _jax_is_independent_ana(func, *args, **kwargs): """"""Test analytically whether a function is independent of its arguments using JAX. Args: func (callable): Function to test for independence args (tuple): Arguments for the function with respect to which to test for independence kwargs (dict): Keyword arguments for the function at which (but not with respect to which) to test for independence Returns: bool: Whether the function seems to not depend on it ``args`` analytically. That is, an output of ``True`` means that the ``args`` do *not* feed into the output. In JAX, we test this by constructing the VJP of the passed function and inspecting its signature: The first argument of the output of ``jax.vjp`` is a ``Partial``. If *any* processing happens to any input, the arguments of that ``Partial`` are unequal to ``((),)`. Functions that depend on the input in a trivial manner, i.e., without processing it, will go undetected by this. Therefore we also test the arguments of the *function* of the above ``Partial``. The first of these arguments is a list of tuples and if the first entry of the first tuple is not ``None``, the input arguments are detected to actually feed into the output. .. warning:: This is an experimental function and unknown edge cases may exist to this two-stage test. """""" import jax # pylint: disable=import-outside-toplevel mapped_func = lambda *_args: func(*_args, **kwargs) # pylint: disable=unnecessary-lambda _vjp = jax.vjp(mapped_func, *args)[1] if _vjp.args[0].args != ((),): return False if _vjp.args[0].func.args[0][0][0] is not None: return False return True " 24356,"def get_tox_envs( checks, style=False, format_style=False, benchmark=False, every=False, changed_only=False, sort=False, e2e_tests_only=False, ): testable_checks = get_testable_checks() # Run `get_changed_checks` at most once because git calls are costly changed_checks = get_changed_checks() if not checks or changed_only else None if not checks: checks = sorted(testable_checks & changed_checks) checks_seen = set() tox_env_filter = os.environ.get(""TOX_SKIP_ENV"") tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None for check in checks: check, _, envs_selected = check.partition(':') echo_debug(f""Getting tox envs for `{check}:{envs_selected}`"") if check in checks_seen: echo_debug(f""`{check}` already evaluated, skipping"") continue if check not in testable_checks: echo_debug(f""`{check}` is not testable, skipping"") continue if changed_only and check not in changed_checks: echo_debug(f""`{check}` does not have changes, skipping"") continue else: checks_seen.add(check) envs_selected = envs_selected.split(',') if envs_selected else [] envs_available = get_available_tox_envs(check, sort=sort, e2e_tests_only=e2e_tests_only) if format_style: envs_selected[:] = [e for e in envs_available if 'format_style' in e] elif style: envs_selected[:] = [e for e in envs_available if e in STYLE_CHECK_ENVS] elif benchmark: envs_selected[:] = [e for e in envs_available if 'bench' in e] else: if every: envs_selected[:] = envs_available elif envs_selected: available = set(envs_selected) & set(envs_available) selected = [] # Retain order and remove duplicates for e in envs_selected: # TODO: support globs or regex if e in available: selected.append(e) available.remove(e) envs_selected[:] = selected else: envs_selected[:] = [e for e in envs_available if 'bench' not in e and 'format_style' not in e] if tox_env_filter_re: envs_selected[:] = [e for e in envs_selected if not tox_env_filter_re.match(e)] echo_debug(f""Selected environments: {envs_selected}"") yield check, envs_selected ","def get_tox_envs( checks, style=False, format_style=False, benchmark=False, every=False, changed_only=False, sort=False, e2e_tests_only=False, ): testable_checks = get_testable_checks() # Run `get_changed_checks` at most once because git calls are costly changed_checks = get_changed_checks() if not checks or changed_only else None if not checks: checks = sorted(testable_checks & changed_checks) checks_seen = set() tox_env_filter = os.environ.get(""TOX_SKIP_ENV"") tox_env_filter_re = re.compile(tox_env_filter) if tox_env_filter is not None else None for check in checks: check, _, envs_selected = check.partition(':') echo_debug(f""Getting tox envs for `{check}:{envs_selected}`"") if check in checks_seen: echo_debug(f""`{check}` already evaluated, skipping"") continue elif check not in testable_checks: echo_debug(f""`{check}` is not testable, skipping"") continue if changed_only and check not in changed_checks: echo_debug(f""`{check}` does not have changes, skipping"") continue else: checks_seen.add(check) envs_selected = envs_selected.split(',') if envs_selected else [] envs_available = get_available_tox_envs(check, sort=sort, e2e_tests_only=e2e_tests_only) if format_style: envs_selected[:] = [e for e in envs_available if 'format_style' in e] elif style: envs_selected[:] = [e for e in envs_available if e in STYLE_CHECK_ENVS] elif benchmark: envs_selected[:] = [e for e in envs_available if 'bench' in e] else: if every: envs_selected[:] = envs_available elif envs_selected: available = set(envs_selected) & set(envs_available) selected = [] # Retain order and remove duplicates for e in envs_selected: # TODO: support globs or regex if e in available: selected.append(e) available.remove(e) envs_selected[:] = selected else: envs_selected[:] = [e for e in envs_available if 'bench' not in e and 'format_style' not in e] if tox_env_filter_re: envs_selected[:] = [e for e in envs_selected if not tox_env_filter_re.match(e)] echo_debug(f""Selected environments: {envs_selected}"") yield check, envs_selected " 45903,"def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor: r""""""Convert a raw bayer image to RGB version of image. We are assuming a CFA with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2 The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2 for simplicity reasons Args: image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`. cfa: The configuration of the color filter. Returns: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> rawinput = torch.randn(2, 1, 4, 6) >>> rgb = raw_to_rgb(rawinput) # 2x3x4x5 """""" if not isinstance(image, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. "" f""Got {type(image)}"") if image.dim() < 3 or image.size(-3) != 1: raise ValueError(f""Input size must have a shape of (*, 1, H, W). "" f""Got {image.shape}."") if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1: raise ValueError(f""Input H&W must be evenly disible by 2. Got {image.shape}"") dosqueeze = False # for compatibility with pytorch funcitons, make sure we are always 4 dimensions and # strip the extra at the end, if necessary if len(image.shape) == 3: image = image.unsqueeze(0) dosqueeze = True # BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches # opencv naming so makes sense to keep if cfa == CFA.BG: r = image[..., :, ::2, ::2] b = image[..., :, 1::2, 1::2] rpad = (0, 1, 0, 1) bpad = (1, 0, 1, 0) elif cfa == CFA.GB: r = image[..., :, ::2, 1::2] b = image[..., :, 1::2, ::2] rpad = (1, 0, 0, 1) bpad = (0, 1, 1, 0) elif cfa == CFA.RG: r = image[..., :, 1::2, 1::2] b = image[..., :, ::2, ::2] rpad = (1, 0, 1, 0) bpad = (0, 1, 0, 1) elif cfa == CFA.GR: r = image[..., :, 1::2, ::2] b = image[..., :, ::2, 1::2] rpad = (0, 1, 1, 0) bpad = (1, 0, 0, 1) else: raise ValueError(f""Unsupported CFA "" f""Got {cfa}."") # upscaling r and b with bi-linear gives reasonable quality # Note that depending on where these are sampled we need to pad appropriately # the bilinear filter will pretty much be based on for example this layout (RG) # (which needs to be padded bottom right) # +-+-+ # |B| | # | | | # +-+-+ # While in this layout we need to pad with additional B samples top left to # make sure we interpolate from the correct position # +-+-+ # | | | # | |B| # +-+-+ # For an image like this (3x2 blue pixels) # +------+ # |B B B | # | | # |B B B | # | | # +------+ # It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation) # +-------+ # |B B B b| # | | # |B B B b| # | | # |b b b b| # +-------+ # and we crop the area afterwards. This is since the interpolation will be between first and last pixel # evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist # Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that # way we get to keep the knwon blue samples across the whole image rpadded = torch.nn.functional.pad(r, rpad, 'replicate') bpadded = torch.nn.functional.pad(b, bpad, 'replicate') # use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors # for a 2x2 bayer filter gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect') ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1), mode='bilinear', align_corners=True) bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1), mode='bilinear', align_corners=True) # remove the extra padding ru = torch.nn.functional.pad(ru, [-x for x in rpad]) bu = torch.nn.functional.pad(bu, [-x for x in bpad]) # all unknown pixels are the average of the nearby green samples kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]], dtype=image.dtype, device=image.device) # This is done on all samples but result for the known green samples is then overwritten by the input gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid') # overwrite the already known samples which otherwise have values from r/b # this depends on the CFA configuration if cfa == CFA.BG: gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2] gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2] elif cfa == CFA.GB: gu[:, :, ::2, ::2] = image[:, :, ::2, ::2] gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2] elif cfa == CFA.RG: gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2] gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2] elif cfa == CFA.GR: gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2] gu[:, :, ::2, ::2] = image[:, :, ::2, ::2] else: raise ValueError(f""Unsupported CFA "" f""Got {cfa}."") rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3) # return possibly missing batch dim if dosqueeze: rgb = rgb.squeeze(0) return rgb ","def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor: r""""""Convert a raw bayer image to RGB version of image. We are assuming a CFA with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2 The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2 for simplicity reasons Args: image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`. cfa: The configuration of the color filter. Returns: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> rawinput = torch.randn(2, 1, 4, 6) >>> rgb = raw_to_rgb(rawinput, cfa=CFA.BG) # 2x3x4x5 """""" if not isinstance(image, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. "" f""Got {type(image)}"") if image.dim() < 3 or image.size(-3) != 1: raise ValueError(f""Input size must have a shape of (*, 1, H, W). "" f""Got {image.shape}."") if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1: raise ValueError(f""Input H&W must be evenly disible by 2. Got {image.shape}"") dosqueeze = False # for compatibility with pytorch funcitons, make sure we are always 4 dimensions and # strip the extra at the end, if necessary if len(image.shape) == 3: image = image.unsqueeze(0) dosqueeze = True # BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches # opencv naming so makes sense to keep if cfa == CFA.BG: r = image[..., :, ::2, ::2] b = image[..., :, 1::2, 1::2] rpad = (0, 1, 0, 1) bpad = (1, 0, 1, 0) elif cfa == CFA.GB: r = image[..., :, ::2, 1::2] b = image[..., :, 1::2, ::2] rpad = (1, 0, 0, 1) bpad = (0, 1, 1, 0) elif cfa == CFA.RG: r = image[..., :, 1::2, 1::2] b = image[..., :, ::2, ::2] rpad = (1, 0, 1, 0) bpad = (0, 1, 0, 1) elif cfa == CFA.GR: r = image[..., :, 1::2, ::2] b = image[..., :, ::2, 1::2] rpad = (0, 1, 1, 0) bpad = (1, 0, 0, 1) else: raise ValueError(f""Unsupported CFA "" f""Got {cfa}."") # upscaling r and b with bi-linear gives reasonable quality # Note that depending on where these are sampled we need to pad appropriately # the bilinear filter will pretty much be based on for example this layout (RG) # (which needs to be padded bottom right) # +-+-+ # |B| | # | | | # +-+-+ # While in this layout we need to pad with additional B samples top left to # make sure we interpolate from the correct position # +-+-+ # | | | # | |B| # +-+-+ # For an image like this (3x2 blue pixels) # +------+ # |B B B | # | | # |B B B | # | | # +------+ # It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation) # +-------+ # |B B B b| # | | # |B B B b| # | | # |b b b b| # +-------+ # and we crop the area afterwards. This is since the interpolation will be between first and last pixel # evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist # Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that # way we get to keep the knwon blue samples across the whole image rpadded = torch.nn.functional.pad(r, rpad, 'replicate') bpadded = torch.nn.functional.pad(b, bpad, 'replicate') # use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors # for a 2x2 bayer filter gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect') ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1), mode='bilinear', align_corners=True) bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1), mode='bilinear', align_corners=True) # remove the extra padding ru = torch.nn.functional.pad(ru, [-x for x in rpad]) bu = torch.nn.functional.pad(bu, [-x for x in bpad]) # all unknown pixels are the average of the nearby green samples kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]], dtype=image.dtype, device=image.device) # This is done on all samples but result for the known green samples is then overwritten by the input gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid') # overwrite the already known samples which otherwise have values from r/b # this depends on the CFA configuration if cfa == CFA.BG: gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2] gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2] elif cfa == CFA.GB: gu[:, :, ::2, ::2] = image[:, :, ::2, ::2] gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2] elif cfa == CFA.RG: gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2] gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2] elif cfa == CFA.GR: gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2] gu[:, :, ::2, ::2] = image[:, :, ::2, ::2] else: raise ValueError(f""Unsupported CFA "" f""Got {cfa}."") rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3) # return possibly missing batch dim if dosqueeze: rgb = rgb.squeeze(0) return rgb " 858,"def sub_pre(e): """""" Replace y - x with -(x - y) if -1 can be extracted from y - x. """""" # replacing Add, A, from which -1 can be extracted with -1*-A adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()] reps = {} ignore = set() for a in adds: na = -a if na.is_Mul: # e.g. MatExpr set.add(a) continue reps[a] = Mul._from_args([S.NegativeOne, na]) e = e.xreplace(reps) # repeat again for persisting Adds but mark these with a leading 1, -1 # e.g. y - x -> 1*-1*(x - y) if isinstance(e, Basic): negs = {} for a in sorted(e.atoms(Add), key=default_sort_key): if a in ignore: continue if a in reps: negs[a] = reps[a] elif a.could_extract_minus_sign(): negs[a] = Mul._from_args([S.One, S.NegativeOne, -a]) e = e.xreplace(negs) return e ","def sub_pre(e): """""" Replace y - x with -(x - y) if -1 can be extracted from y - x. """""" # replacing Add, A, from which -1 can be extracted with -1*-A adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()] reps = {} ignore = set() for a in adds: na = -a if na.is_Mul: # e.g. MatExpr ignore.add(a) continue reps[a] = Mul._from_args([S.NegativeOne, na]) e = e.xreplace(reps) # repeat again for persisting Adds but mark these with a leading 1, -1 # e.g. y - x -> 1*-1*(x - y) if isinstance(e, Basic): negs = {} for a in sorted(e.atoms(Add), key=default_sort_key): if a in ignore: continue if a in reps: negs[a] = reps[a] elif a.could_extract_minus_sign(): negs[a] = Mul._from_args([S.One, S.NegativeOne, -a]) e = e.xreplace(negs) return e " 9181,"def weblate_context(request): """"""Context processor to inject various useful variables into context."""""" if url_has_allowed_host_and_scheme(request.GET.get(""next"", """"), allowed_hosts=None): login_redirect_url = request.GET[""next""] else: login_redirect_url = request.get_full_path() # Load user translations if user is authenticated watched_projects = None if hasattr(request, ""user"") and request.user.is_authenticated: watched_projects = request.user.watched_projects if settings.OFFER_HOSTING: description = _(""Hosted Weblate, the place to localize your software project."") else: description = _( ""This site runs Weblate for localizing various software projects."" ) if hasattr(request, ""_weblate_has_support""): has_support = request._weblate_has_support else: has_support_cache_key = ""weblate:has:support"" has_support = cache.get(has_support_cache_key) if has_support is None: support_status = SupportStatus.objects.get_current() has_support = support_status.name != ""community"" cache.set(has_support_cache_key, has_support, 86400) request._weblate_has_support = has_support utcnow = datetime.utcnow() context = { ""has_support"": has_support, ""cache_param"": f""?v={weblate.utils.version.GIT_VERSION}"" if not settings.COMPRESS_ENABLED else """", ""version"": weblate.utils.version.VERSION, ""bread_image"": get_bread_image(request.path), ""description"": description, ""weblate_link"": mark_safe(f'weblate.org'), ""weblate_name_link"": mark_safe(f'Weblate'), ""weblate_version_link"": format_html( 'Weblate {}', WEBLATE_URL, mark_safe("""" if settings.HIDE_VERSION else weblate.utils.version.VERSION), ), ""donate_url"": DONATE_URL, ""site_url"": get_site_url(), ""site_domain"": get_site_domain(), ""current_date"": utcnow.strftime(""%Y-%m-%d""), ""current_year"": utcnow.strftime(""%Y""), ""current_month"": utcnow.strftime(""%m""), ""login_redirect_url"": login_redirect_url, ""has_ocr"": weblate.screenshots.views.HAS_OCR, ""has_antispam"": bool(settings.AKISMET_API_KEY), ""has_sentry"": bool(settings.SENTRY_DSN), ""watched_projects"": watched_projects, ""allow_index"": False, ""configuration_errors"": ConfigurationError.objects.filter( ignored=False ).order_by(""-timestamp""), ""preconnect_list"": get_preconnect_list(), ""custom_css_hash"": CustomCSSView.get_hash(request), ""interledger_payment_pointer"": get_interledger_payment_pointer(), } add_error_logging_context(context) add_settings_context(context) add_optional_context(context) return context ","def weblate_context(request): """"""Context processor to inject various useful variables into context."""""" if url_has_allowed_host_and_scheme(request.GET.get(""next"", """"), allowed_hosts=None): login_redirect_url = request.GET[""next""] else: login_redirect_url = request.get_full_path() # Load user translations if user is authenticated watched_projects = None if hasattr(request, ""user"") and request.user.is_authenticated: watched_projects = request.user.watched_projects if settings.OFFER_HOSTING: description = _(""Hosted Weblate, the place to localize your software project."") else: description = _( ""This site runs Weblate for localizing various software projects."" ) if hasattr(request, ""_weblate_has_support""): has_support = request._weblate_has_support else: has_support_cache_key = ""weblate:has:support"" has_support = cache.get(has_support_cache_key) if has_support is None: support_status = SupportStatus.objects.get_current() has_support = support_status.name != ""community"" cache.set(has_support_cache_key, has_support, 86400) request._weblate_has_support = has_support utcnow = datetime.utcnow() context = { ""has_support"": has_support, ""cache_param"": f""?v={weblate.utils.version.GIT_VERSION}"" if not settings.COMPRESS_ENABLED else """", ""version"": weblate.utils.version.VERSION, ""bread_image"": get_bread_image(request.path), ""description"": description, ""weblate_link"": mark_safe(f'weblate.org'), ""weblate_name_link"": mark_safe(f'Weblate'), ""weblate_version_link"": format_html( 'Weblate {}', WEBLATE_URL, """" if settings.HIDE_VERSION else weblate.utils.version.VERSION, ), ""donate_url"": DONATE_URL, ""site_url"": get_site_url(), ""site_domain"": get_site_domain(), ""current_date"": utcnow.strftime(""%Y-%m-%d""), ""current_year"": utcnow.strftime(""%Y""), ""current_month"": utcnow.strftime(""%m""), ""login_redirect_url"": login_redirect_url, ""has_ocr"": weblate.screenshots.views.HAS_OCR, ""has_antispam"": bool(settings.AKISMET_API_KEY), ""has_sentry"": bool(settings.SENTRY_DSN), ""watched_projects"": watched_projects, ""allow_index"": False, ""configuration_errors"": ConfigurationError.objects.filter( ignored=False ).order_by(""-timestamp""), ""preconnect_list"": get_preconnect_list(), ""custom_css_hash"": CustomCSSView.get_hash(request), ""interledger_payment_pointer"": get_interledger_payment_pointer(), } add_error_logging_context(context) add_settings_context(context) add_optional_context(context) return context " 12481,"def parse_gray_color(cup: bytes) -> str: """"""Reproduce a gray color in ANSI escape sequence"""""" if sys.platform == ""win32"": assert False, ""curses is not available on Windows"" set_color = ''.join([cup[:-1].decode(), 'm']) gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode() return gray ","def parse_gray_color(cup: bytes) -> str: """"""Reproduce a gray color in ANSI escape sequence"""""" assert sys.platform != ""win32"", ""curses is not available on Windows"" set_color = ''.join([cup[:-1].decode(), 'm']) gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode() return gray " 806,"def test_derivatives_matrix_norms(): expr = x.T*y assert expr.diff(x) == y expr = (x.T*y)**S.Half assert expr.diff(x) == y/(2*sqrt(x.T*y)) expr = (x.T*x)**S.Half assert expr.diff(x) == x*(x.T*x)**S.Half expr = (c.T*a*x.T*b)**S.Half assert expr.diff(x) == b/(2*sqrt(c.T*a*x.T*b))*c.T*a expr = (c.T*a*x.T*b)**(S.One/3) assert expr.diff(x) == b*(c.T*a*x.T*b)**(-2*S.One/3)*c.T*a/3 expr = (a.T*X*b)**S.Half assert expr.diff(X) == a/(2*sqrt(a.T*X*b))*b.T expr = d.T*x*(a.T*X*b)**S.Half*y.T*c assert expr.diff(X) == a*x.T*d/(2*sqrt(a.T*X*b))*y.T*c*b.T ","def test_derivatives_matrix_norms(): expr = x.T*y assert expr.diff(x) == y expr = (x.T*y)**S.Half assert expr.diff(x) == y/(2*sqrt(x.T*y)) expr = (x.T*x)**S.Half assert expr.diff(x) == x*(x.T*x)**(-S.Half) expr = (c.T*a*x.T*b)**S.Half assert expr.diff(x) == b/(2*sqrt(c.T*a*x.T*b))*c.T*a expr = (c.T*a*x.T*b)**(S.One/3) assert expr.diff(x) == b*(c.T*a*x.T*b)**(-2*S.One/3)*c.T*a/3 expr = (a.T*X*b)**S.Half assert expr.diff(X) == a/(2*sqrt(a.T*X*b))*b.T expr = d.T*x*(a.T*X*b)**S.Half*y.T*c assert expr.diff(X) == a*x.T*d/(2*sqrt(a.T*X*b))*y.T*c*b.T " 11485,"def format_samples(sdk_code_path) -> None: generate_sample_path = Path(sdk_code_path + '/generate_sample') if not os.path.exists(generate_sample_path): _LOGGER.info(f'not find generate_sample') return try: import black except Exception as e: check_call('pip install black', shell=True) import black _BLACK_MODE = black.Mode() _BLACK_MODE.line_length = 120 files = generate_sample_path.glob('**/*.py') for path in files: with open(path, 'r') as fr: file_content = fr.read() with suppress(black.NothingChanged): file_content = black.format_file_contents(file_content, fast=True, mode=_BLACK_MODE) with open(path, 'w') as fw: fw.write(file_content) _LOGGER.info(f'format generate_sample successfully') ","def format_samples(sdk_code_path) -> None: generate_sample_path = Path(sdk_code_path) / 'generated_samples' if not os.path.exists(generate_sample_path): _LOGGER.info(f'not find generate_sample') return try: import black except Exception as e: check_call('pip install black', shell=True) import black _BLACK_MODE = black.Mode() _BLACK_MODE.line_length = 120 files = generate_sample_path.glob('**/*.py') for path in files: with open(path, 'r') as fr: file_content = fr.read() with suppress(black.NothingChanged): file_content = black.format_file_contents(file_content, fast=True, mode=_BLACK_MODE) with open(path, 'w') as fw: fw.write(file_content) _LOGGER.info(f'format generate_sample successfully') " 32571,"def update_notable_events(baseurl, comment, status=None, urgency=None, owner=None, eventIDs=None, disposition=None, searchID=None, auth_token=None, sessionKey=None): """""" Update some notable events. Arguments: comment -- A description of the change or some information about the notable events status -- A status (only required if you are changing the status of the event) urgency -- An urgency (only required if you are changing the urgency of the event) owner -- A nowner (only required if reassigning the event) eventIDs -- A list of notable event IDs (must be provided if a search ID is not provided) searchID -- An ID of a search. All of the events associated with this search will be modified unless a list of eventIDs are provided that limit the scope to a sub-set of the results. auth_token - The authentication token to use sessionKey -- The session key to use """""" # Make sure that the session ID was provided if not sessionKey and not auth_token: raise Exception(""A session_key/auth_token was not provided"") # Make sure that rule IDs and/or a search ID is provided if eventIDs is None and searchID is None: raise Exception(""Either eventIDs of a searchID must be provided (or both)"") # These the arguments to the REST handler args = {} args['comment'] = comment if status is not None: args['status'] = status if urgency is not None: args['urgency'] = urgency if owner is not None: args['newOwner'] = owner # Provide the list of event IDs that you want to change: if eventIDs is not None: args['ruleUIDs'] = eventIDs if disposition is not None: args['disposition'] = disposition # If you want to manipulate the notable events returned by a search then include the search ID if searchID is not None: args['searchID'] = searchID if not auth_token: auth_header = {'Authorization': sessionKey} else: auth_header = {'Authorization': 'Bearer %s' % auth_token} args['output_mode'] = 'json' mod_notables = requests.post(baseurl + 'services/notable_update', data=args, headers=auth_header, verify=VERIFY_CERTIFICATE) return mod_notables.json() ","def update_notable_events(baseurl, comment, status=None, urgency=None, owner=None, eventIDs=None, disposition=None, searchID=None, auth_token=None, sessionKey=None): """""" Update some notable events. Arguments: comment -- A description of the change or some information about the notable events status -- A status (only required if you are changing the status of the event) urgency -- An urgency (only required if you are changing the urgency of the event) owner -- A nowner (only required if reassigning the event) eventIDs -- A list of notable event IDs (must be provided if a search ID is not provided) searchID -- An ID of a search. All of the events associated with this search will be modified unless a list of eventIDs are provided that limit the scope to a sub-set of the results. auth_token - The authentication token to use sessionKey -- The session key to use """""" # Make sure that the session ID was provided if not sessionKey and not auth_token: raise Exception(""A session_key/auth_token was not provided"") # Make sure that rule IDs and/or a search ID is provided if eventIDs is None and searchID is None: raise Exception(""Either eventIDs of a searchID must be provided (or both)"") # These the arguments to the REST handler args = {} args['comment'] = comment if status is not None: args['status'] = status if urgency is not None: args['urgency'] = urgency if owner is not None: args['newOwner'] = owner # Provide the list of event IDs that you want to change: if eventIDs is not None: args['ruleUIDs'] = eventIDs if disposition: args['disposition'] = disposition # If you want to manipulate the notable events returned by a search then include the search ID if searchID is not None: args['searchID'] = searchID if not auth_token: auth_header = {'Authorization': sessionKey} else: auth_header = {'Authorization': 'Bearer %s' % auth_token} args['output_mode'] = 'json' mod_notables = requests.post(baseurl + 'services/notable_update', data=args, headers=auth_header, verify=VERIFY_CERTIFICATE) return mod_notables.json() " 34307,"def add_confused_intents_to_report( report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any] ) -> Dict: from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels cnf_matrix = confusion_matrix(target_intents, predicted_intents) indices = np.argsort(cnf_matrix, axis=1) labels = unique_labels(target_intents, predicted_intents) n_candidates = min(3, len(labels)) for label in labels: if report.get(label): report[label][""confused_with""] = {} for i, label in enumerate(labels): for j in range(n_candidates): label_idx = indices[i, -j] _label = labels[label_idx] num_hits = int(cnf_matrix[i, label_idx]) if _label != label and num_hits > 0: report[label][""confused_with""][_label] = num_hits return report ","def _add_confused_intents_to_report( report: Dict, target_intents: Iterable[Any], predicted_intents: Iterable[Any] ) -> Dict: from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels cnf_matrix = confusion_matrix(target_intents, predicted_intents) indices = np.argsort(cnf_matrix, axis=1) labels = unique_labels(target_intents, predicted_intents) n_candidates = min(3, len(labels)) for label in labels: if report.get(label): report[label][""confused_with""] = {} for i, label in enumerate(labels): for j in range(n_candidates): label_idx = indices[i, -j] _label = labels[label_idx] num_hits = int(cnf_matrix[i, label_idx]) if _label != label and num_hits > 0: report[label][""confused_with""][_label] = num_hits return report " 17670,"def run_via_pbs(args, pbs): warnings.warn(""Job submission via --pbs-runner is deprecated."" ""Use something like condor_run"", DeprecationWarning) assert(pbs in ('condor',)) # for now # TODO: RF to support multiple backends, parameters, etc, for now -- just condor, no options f = NamedTemporaryFile('w', prefix='datalad-%s-' % pbs, suffix='.submit', delete=False) try: pwd = getpwd() logs = f.name.replace('.submit', '.log') exe = args[0] # TODO: we might need better way to join them, escaping spaces etc. There must be a stock helper #exe_args = ' '.join(map(repr, args[1:])) if len(args) > 1 else '' exe_args = ' '.join(args[1:]) if len(args) > 1 else '' f.write(""""""\ Executable = %(exe)s Initialdir = %(pwd)s Output = %(logs)s Error = %(logs)s getenv = True arguments = %(exe_args)s queue """""" % locals()) f.close() Runner().run(['condor_submit', f.name]) lgr.info(""Scheduled execution via %s. Logs will be stored under %s"", pbs, logs) finally: unlink(f.name) ","def run_via_pbs(args, pbs): warnings.warn(""Job submission via --pbs-runner is deprecated."" ""Use something like condor_run from datalad-htcondor extension"", DeprecationWarning) assert(pbs in ('condor',)) # for now # TODO: RF to support multiple backends, parameters, etc, for now -- just condor, no options f = NamedTemporaryFile('w', prefix='datalad-%s-' % pbs, suffix='.submit', delete=False) try: pwd = getpwd() logs = f.name.replace('.submit', '.log') exe = args[0] # TODO: we might need better way to join them, escaping spaces etc. There must be a stock helper #exe_args = ' '.join(map(repr, args[1:])) if len(args) > 1 else '' exe_args = ' '.join(args[1:]) if len(args) > 1 else '' f.write(""""""\ Executable = %(exe)s Initialdir = %(pwd)s Output = %(logs)s Error = %(logs)s getenv = True arguments = %(exe_args)s queue """""" % locals()) f.close() Runner().run(['condor_submit', f.name]) lgr.info(""Scheduled execution via %s. Logs will be stored under %s"", pbs, logs) finally: unlink(f.name) " 34946,"def load_module(path, fmt=""""): """"""Load module from file. Parameters ---------- path : str The path to the module file. fmt : str, optional The format of the file, if not specified it will be inferred from suffix of the file. Returns ------- module : runtime.Module The loaded module Note ---- This function will automatically call cc.create_shared if the path is in format .o or .tar """""" # c++ compiler/linker cc = os.environ[""CXX""] if ""CXX"" in os.environ.keys() else ""g++"" # High level handling for .o and .tar file. # We support this to be consistent with RPC module load. if path.endswith("".o""): # Extra dependencies during runtime. from tvm.contrib import cc as _cc _cc.create_shared(path + "".so"", path, cc=cc) path += "".so"" elif path.endswith("".tar""): # Extra dependencies during runtime. from tvm.contrib import cc as _cc, util as _util, tar as _tar tar_temp = _util.tempdir(custom_path=path.replace("".tar"", """")) _tar.untar(path, tar_temp.temp_dir) files = [tar_temp.relpath(x) for x in tar_temp.listdir()] _cc.create_shared(path + "".so"", files, cc=cc) path += "".so"" # TODO(weberlo): we should probably use a more distinctive suffix for uTVM object files elif path.endswith("".obj""): fmt = ""micro_dev"" # Redirect to the load API return _ffi_api.ModuleLoadFromFile(path, fmt) ","def load_module(path, fmt=""""): """"""Load module from file. Parameters ---------- path : str The path to the module file. fmt : str, optional The format of the file, if not specified it will be inferred from suffix of the file. Returns ------- module : runtime.Module The loaded module Note ---- This function will automatically call cc.create_shared if the path is in format .o or .tar """""" # c++ compiler/linker cc = os.environ.get(""CXX"", ""g++"") # High level handling for .o and .tar file. # We support this to be consistent with RPC module load. if path.endswith("".o""): # Extra dependencies during runtime. from tvm.contrib import cc as _cc _cc.create_shared(path + "".so"", path, cc=cc) path += "".so"" elif path.endswith("".tar""): # Extra dependencies during runtime. from tvm.contrib import cc as _cc, util as _util, tar as _tar tar_temp = _util.tempdir(custom_path=path.replace("".tar"", """")) _tar.untar(path, tar_temp.temp_dir) files = [tar_temp.relpath(x) for x in tar_temp.listdir()] _cc.create_shared(path + "".so"", files, cc=cc) path += "".so"" # TODO(weberlo): we should probably use a more distinctive suffix for uTVM object files elif path.endswith("".obj""): fmt = ""micro_dev"" # Redirect to the load API return _ffi_api.ModuleLoadFromFile(path, fmt) " 28076,"def get_source_file_paths( file_filters: List[str], compile_commands: tu_collector.CompilationDB, header_file_extensions=( '.h', '.hh', '.H', '.hp', '.hxx', '.hpp', '.HPP', '.h++', '.tcc') ) -> List[str]: """""" Returns a list of source files for existing header file otherwise returns with the same file path expression. """""" file_paths = [] for file_filter in file_filters: file_paths.append(file_filter) if os.path.exists(file_filter) and \ file_filter.endswith(header_file_extensions): LOG.info(""Get dependent source files for '%s'..."", file_filter) dependent_sources = tu_collector.get_dependent_sources( compile_commands, file_filter) LOG.info(""Get dependent source files for '%s' done."", file_filter) LOG.debug(""Dependent source files: %s"", ', '.join(dependent_sources)) file_paths.extend(dependent_sources) return file_paths ","def get_source_file_paths( file_filters: List[str], compile_commands: tu_collector.CompilationDB, header_file_extensions=( '.h', '.hh', '.H', '.hp', '.hxx', '.hpp', '.HPP', '.h++', '.tcc') ) -> List[str]: """""" Returns a list of source files for existing header file otherwise returns with the same file path expression. """""" file_paths = [] for file_filter in file_filters: file_paths.append(file_filter) if os.path.exists(file_filter) and \ file_filter.endswith(header_file_extensions): LOG.info(""Get dependent source files for '%s'..."", file_filter) dependent_sources = tu_collector.get_dependent_sources( compile_commands, file_filter) LOG.info(""Get dependent source files for '%s' done."", file_filter) LOG.debug(""Dependent source files: %s"", ', '.join(dependent_sources)) file_paths.extend(dependent_sources) file_paths.update(dependent_sources) return file_paths " 45708,"def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if len(subtimesteps) == 0: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) ","def forecast( vil, velocity, timesteps, rainrate=None, n_cascade_levels=8, extrap_method=""semilagrangian"", ar_order=2, ar_window_radius=50, r_vil_window_radius=3, fft_method=""numpy"", apply_rainrate_mask=True, num_workers=1, extrap_kwargs=None, filter_kwargs=None, measure_time=False, ): """"""Generate a nowcast by using the autoregressive nowcasting using VIL (ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast. The key features are: 1) Growth and decay: implemented by using a cascade decomposition and a multiscale autoregressive integrated ARI(p,1) model. Instead of the original time series, the ARI model is applied to the differenced one corresponding to time derivatives. 2) Originally designed for using integrated liquid (VIL) as the input data. In this case, the rain rate (R) is obtained from VIL via an empirical relation. This implementation is more general so that the input can be any two-dimensional precipitation field. 3) The parameters of the ARI model and the R(VIL) relation are allowed to be spatially variable. The estimation is done using a moving window. Parameters ---------- vil: array_like Array of shape (ar_order+2,m,n) containing the input fields ordered by timestamp from oldest to newest. The inputs are expected to contain VIL or rain rate. The time steps between the inputs are assumed to be regular. velocity: array_like Array of shape (2,m,n) containing the x- and y-components of the advection field. The velocities are assumed to represent one time step between the inputs. All values are required to be finite. timesteps: int or list Number of time steps to forecast or a list of time steps for which the forecasts are computed (relative to the input time step). The elements of the list are required to be in ascending order. rainrate: array_like Array of shape (m,n) containing the most recently observed rain rate field. If set to None, no R(VIL) conversion is done and the outputs are in the same units as the inputs. n_cascade_levels: int, optional The number of cascade levels to use. extrap_method: str, optional Name of the extrapolation method to use. See the documentation of pysteps.extrapolation.interface. ar_order: int, optional The order of the autoregressive model to use. The recommended values are 1 or 2. Using a higher-order model is strongly discouraged because the stationarity of the AR process cannot be guaranteed. ar_window_radius: int, optional The radius of the window to use for determining the parameters of the autoregressive model. Set to None to disable localization. r_vil_window_radius: int, optional The radius of the window to use for determining the R(VIL) relation. Applicable if rainrate is not None. fft_method: str, optional A string defining the FFT method to use (see utils.fft.get_method). Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed, the recommended method is 'pyfftw'. apply_rainrate_mask: bool Apply mask to prevent producing precipitation to areas where it was not originally observed. Defaults to True. Disabling this may improve some verification metrics but increases the number of false alarms. Applicable if rainrate is None. num_workers: int, optional The number of workers to use for parallel computation. Applicable if dask is installed or pyFFTW is used for computing the FFT. When num_workers>1, it is advisable to disable OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous threads. extrap_kwargs: dict, optional Optional dictionary containing keyword arguments for the extrapolation method. See the documentation of pysteps.extrapolation. filter_kwargs: dict, optional Optional dictionary containing keyword arguments for the filter method. See the documentation of pysteps.cascade.bandpass_filters.py. measure_time: bool, optional If True, measure, print and return the computation time. Returns ------- out: ndarray A three-dimensional array of shape (num_timesteps,m,n) containing a time series of forecast precipitation fields. The time series starts from t0+timestep, where timestep is taken from the input VIL/rain rate fields. If measure_time is True, the return value is a three-element tuple containing the nowcast array, the initialization time of the nowcast generator and the time used in the main loop (seconds). References ---------- :cite:`PCLH2020` """""" _check_inputs(vil, rainrate, velocity, timesteps, ar_order) if extrap_kwargs is None: extrap_kwargs = dict() else: extrap_kwargs = extrap_kwargs.copy() if filter_kwargs is None: filter_kwargs = dict() print(""Computing ANVIL nowcast:"") print(""------------------------"") print("""") print(""Inputs:"") print(""-------"") print(""input dimensions: %dx%d"" % (vil.shape[1], vil.shape[2])) print("""") print(""Methods:"") print(""--------"") print(""extrapolation: %s"" % extrap_method) print(""FFT: %s"" % fft_method) print("""") print(""Parameters:"") print(""-----------"") if isinstance(timesteps, int): print(""number of time steps: %d"" % timesteps) else: print(""time steps: %s"" % timesteps) print(""parallel threads: %d"" % num_workers) print(""number of cascade levels: %d"" % n_cascade_levels) print(""order of the ARI(p,1) model: %d"" % ar_order) if type(ar_window_radius) == int: print(""ARI(p,1) window radius: %d"" % ar_window_radius) else: print(""ARI(p,1) window radius: none"") print(""R(VIL) window radius: %d"" % r_vil_window_radius) if measure_time: starttime_init = time.time() m, n = vil.shape[1:] vil = vil.copy() if rainrate is None and apply_rainrate_mask: rainrate_mask = vil[-1, :] < 0.1 if rainrate is not None: # determine the coefficients fields of the relation R=a*VIL+b by # localized linear regression r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius) # transform the input fields to Lagrangian coordinates by extrapolation extrapolator = extrapolation.get_method(extrap_method) res = list() def worker(vil, i): return ( i, extrapolator( vil[i, :], velocity, vil.shape[0] - 1 - i, allow_nonfinite_values=True, **extrap_kwargs, )[-1], ) for i in range(vil.shape[0] - 1): if not DASK_IMPORTED or num_workers == 1: vil[i, :, :] = worker(vil, i)[1] else: res.append(dask.delayed(worker)(vil, i)) if DASK_IMPORTED and num_workers > 1: num_workers_ = len(res) if num_workers > len(res) else num_workers vil_e = dask.compute(*res, num_workers=num_workers_) for i in range(len(vil_e)): vil[vil_e[i][0], :] = vil_e[i][1] # compute the final mask as the intersection of the masks of the advected # fields mask = np.isfinite(vil[0, :]) for i in range(1, vil.shape[0]): mask = np.logical_and(mask, np.isfinite(vil[i, :])) if rainrate is None and apply_rainrate_mask: rainrate_mask = np.logical_and(rainrate_mask, mask) # apply cascade decomposition to the advected input fields bp_filter_method = cascade.get_method(""gaussian"") bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs) fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers) decomp_method, recomp_method = cascade.get_method(""fft"") vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n)) for i in range(vil.shape[0]): vil_ = vil[i, :].copy() vil_[~np.isfinite(vil_)] = 0.0 vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft) for j in range(n_cascade_levels): vil_dec[j, i, :] = vil_dec_i[""cascade_levels""][j, :] # compute time-lagged correlation coefficients for the cascade levels of # the advected and differenced input fields gamma = np.empty((n_cascade_levels, ar_order, m, n)) for i in range(n_cascade_levels): vil_diff = np.diff(vil_dec[i, :], axis=0) vil_diff[~np.isfinite(vil_diff)] = 0.0 for j in range(ar_order): gamma[i, j, :] = _moving_window_corrcoef( vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius ) if ar_order == 2: # if the order of the ARI model is 2, adjust the correlation coefficients # so that the resulting process is stationary for i in range(n_cascade_levels): gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2( gamma[i, 0, :], gamma[i, 1, :] ) # estimate the parameters of the ARI models phi = [] for i in range(n_cascade_levels): if ar_order > 2: phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1) elif ar_order == 2: phi_ = _estimate_ar2_params(gamma[i, :]) else: phi_ = _estimate_ar1_params(gamma[i, :]) phi.append(phi_) vil_dec = vil_dec[:, -(ar_order + 1) :, :] if measure_time: init_time = time.time() - starttime_init print(""Starting nowcast computation."") if measure_time: starttime_mainloop = time.time() r_f = [] if isinstance(timesteps, int): timesteps = range(timesteps + 1) timestep_type = ""int"" else: original_timesteps = [0] + list(timesteps) timesteps = nowcast_utils.binned_timesteps(original_timesteps) timestep_type = ""list"" if rainrate is not None: r_f_prev = r_vil_a * vil[-1, :] + r_vil_b else: r_f_prev = vil[-1, :] extrap_kwargs[""return_displacement""] = True dp = None t_nowcast = 0 t_prev = 0.0 for t in range(len(timesteps)): if timestep_type == ""list"": subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]] else: subtimesteps = [t] if len(subtimesteps) > 1 or t > 0: nowcast_time_step = True else: nowcast_time_step = False if nowcast_time_step: print( ""Computing nowcast for time step %d... "" % (t_nowcast + 1), end="""", flush=True, ) t_nowcast += 1 if measure_time: starttime = time.time() # iterate the ARI models for each cascade level for i in range(n_cascade_levels): vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i]) # recompose the cascade to obtain the forecast field vil_dec_dict = {} vil_dec_dict[""cascade_levels""] = vil_dec[:, -1, :] vil_dec_dict[""domain""] = ""spatial"" vil_dec_dict[""normalized""] = False vil_f = recomp_method(vil_dec_dict) vil_f[~mask] = np.nan if rainrate is not None: # convert VIL to rain rate r_f_new = r_vil_a * vil_f + r_vil_b else: r_f_new = vil_f if apply_rainrate_mask: r_f_new[rainrate_mask] = 0.0 r_f_new[r_f_new < 0.0] = 0.0 # advect the recomposed field to obtain the forecast for the current # time step (or subtimesteps if non-integer time steps are given) for t_sub in subtimesteps: if t_sub > 0: t_diff_prev_int = t_sub - int(t_sub) if t_diff_prev_int > 0.0: r_f_ip = ( 1.0 - t_diff_prev_int ) * r_f_prev + t_diff_prev_int * r_f_new else: r_f_ip = r_f_prev t_diff_prev = t_sub - t_prev extrap_kwargs[""displacement_prev""] = dp r_f_ep, dp = extrapolator( r_f_ip, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) r_f.append(r_f_ep[0]) t_prev = t_sub # advect the forecast field by one time step if no subtimesteps in the # current interval were found if not subtimesteps: t_diff_prev = t + 1 - t_prev extrap_kwargs[""displacement_prev""] = dp _, dp = extrapolator( None, velocity, [t_diff_prev], allow_nonfinite_values=True, **extrap_kwargs, ) t_prev = t + 1 r_f_prev = r_f_new if nowcast_time_step: if measure_time: print(""%.2f seconds."" % (time.time() - starttime)) else: print(""done."") if measure_time: mainloop_time = time.time() - starttime_mainloop if measure_time: return np.stack(r_f), init_time, mainloop_time else: return np.stack(r_f) " 11842,"def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = ""APP%d"" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == b""JFIF"": # extract JFIF information self.info[""jfif""] = version = i16(s, 5) # version self.info[""jfif_version""] = divmod(version, 256) # extract JFIF properties try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) except Exception: pass else: if jfif_unit == 1: self.info[""dpi""] = jfif_density self.info[""jfif_unit""] = jfif_unit self.info[""jfif_density""] = jfif_density elif marker == 0xFFE1 and s[:5] == b""Exif\0"": if ""exif"" not in self.info: # extract EXIF information (incomplete) self.info[""exif""] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b""FPXR\0"": # extract FlashPix information (incomplete) self.info[""flashpix""] = s # FIXME: value will change elif marker == 0xFFE2 and s[:12] == b""ICC_PROFILE\0"": # Since an ICC profile can be larger than the maximum size of # a JPEG marker (64K), we need provisions to split it into # multiple markers. The format defined by the ICC specifies # one or more APP2 markers containing the following data: # Identifying string ASCII ""ICC_PROFILE\0"" (12 bytes) # Marker sequence number 1, 2, etc (1 byte) # Number of markers Total of APP2's used (1 byte) # Profile data (remainder of APP2 data) # Decoders should use the marker sequence numbers to # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) elif marker == 0xFFED and s[:14] == b""Photoshop 3.0\x00"": # parse the image resource block offset = 14 photoshop = self.info.setdefault(""photoshop"", {}) while s[offset : offset + 4] == b""8BIM"": try: offset += 4 # resource code code = i16(s, offset) offset += 2 # resource name (usually empty) name_len = i8(s[offset]) # name = s[offset+1:offset+1+name_len] offset += 1 + name_len offset += offset & 1 # align # resource data block size = i32(s, offset) offset += 4 data = s[offset : offset + size] if code == 0x03ED: # ResolutionInfo data = { ""XResolution"": i32(data[:4]) / 65536, ""DisplayedUnitsX"": i16(data[4:8]), ""YResolution"": i32(data[8:12]) / 65536, ""DisplayedUnitsY"": i16(data[12:]), } photoshop[code] = data offset += size offset += offset & 1 # align except struct.error: break # no sufficient data elif marker == 0xFFEE and s[:5] == b""Adobe"": self.info[""adobe""] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) except Exception: pass else: self.info[""adobe_transform""] = adobe_transform elif marker == 0xFFE2 and s[:4] == b""MPF\0"": # extract MPO information self.info[""mp""] = s[4:] # offset is current location minus buffer size # plus constant header size self.info[""mpoffset""] = self.fp.tell() - n + 4 # If DPI isn't in JPEG header, fetch from EXIF if ""dpi"" not in self.info and ""exif"" in self.info: try: exif = self.getexif() resolution_unit = exif[0x0128] x_resolution = exif[0x011A] try: dpi = float(x_resolution[0]) / x_resolution[1] except TypeError: dpi = x_resolution if resolution_unit == 3: # cm # 1 dpcm = 2.54 dpi dpi *= 2.54 self.info[""dpi""] = int(dpi + 0.5), int(dpi + 0.5) except (KeyError, SyntaxError, ValueError, ZeroDivisionError): # SyntaxError for invalid/unreadable EXIF # KeyError for dpi not included # ZeroDivisionError for invalid dpi rational value # ValueError for x_resolution[0] being an invalid float self.info[""dpi""] = 72, 72 ","def APP(self, marker): # # Application marker. Store these in the APP dictionary. # Also look for well-known application markers. n = i16(self.fp.read(2)) - 2 s = ImageFile._safe_read(self.fp, n) app = ""APP%d"" % (marker & 15) self.app[app] = s # compatibility self.applist.append((app, s)) if marker == 0xFFE0 and s[:4] == b""JFIF"": # extract JFIF information self.info[""jfif""] = version = i16(s, 5) # version self.info[""jfif_version""] = divmod(version, 256) # extract JFIF properties try: jfif_unit = i8(s[7]) jfif_density = i16(s, 8), i16(s, 10) except Exception: pass else: if jfif_unit == 1: self.info[""dpi""] = jfif_density self.info[""jfif_unit""] = jfif_unit self.info[""jfif_density""] = jfif_density elif marker == 0xFFE1 and s[:5] == b""Exif\0"": if ""exif"" not in self.info: # extract EXIF information (incomplete) self.info[""exif""] = s # FIXME: value will change elif marker == 0xFFE2 and s[:5] == b""FPXR\0"": # extract FlashPix information (incomplete) self.info[""flashpix""] = s # FIXME: value will change elif marker == 0xFFE2 and s[:12] == b""ICC_PROFILE\0"": # Since an ICC profile can be larger than the maximum size of # a JPEG marker (64K), we need provisions to split it into # multiple markers. The format defined by the ICC specifies # one or more APP2 markers containing the following data: # Identifying string ASCII ""ICC_PROFILE\0"" (12 bytes) # Marker sequence number 1, 2, etc (1 byte) # Number of markers Total of APP2's used (1 byte) # Profile data (remainder of APP2 data) # Decoders should use the marker sequence numbers to # reassemble the profile, rather than assuming that the APP2 # markers appear in the correct sequence. self.icclist.append(s) elif marker == 0xFFED and s[:14] == b""Photoshop 3.0\x00"": # parse the image resource block offset = 14 photoshop = self.info.setdefault(""photoshop"", {}) while s[offset : offset + 4] == b""8BIM"": try: offset += 4 # resource code code = i16(s, offset) offset += 2 # resource name (usually empty) name_len = i8(s[offset]) # name = s[offset+1:offset+1+name_len] offset += 1 + name_len offset += offset & 1 # align # resource data block size = i32(s, offset) offset += 4 data = s[offset : offset + size] if code == 0x03ED: # ResolutionInfo data = { ""XResolution"": i32(data[:4]) / 65536, ""DisplayedUnitsX"": i16(data[4:8]), ""YResolution"": i32(data[8:12]) / 65536, ""DisplayedUnitsY"": i16(data[12:]), } photoshop[code] = data offset += size offset += offset & 1 # align except struct.error: break # insufficient data elif marker == 0xFFEE and s[:5] == b""Adobe"": self.info[""adobe""] = i16(s, 5) # extract Adobe custom properties try: adobe_transform = i8(s[1]) except Exception: pass else: self.info[""adobe_transform""] = adobe_transform elif marker == 0xFFE2 and s[:4] == b""MPF\0"": # extract MPO information self.info[""mp""] = s[4:] # offset is current location minus buffer size # plus constant header size self.info[""mpoffset""] = self.fp.tell() - n + 4 # If DPI isn't in JPEG header, fetch from EXIF if ""dpi"" not in self.info and ""exif"" in self.info: try: exif = self.getexif() resolution_unit = exif[0x0128] x_resolution = exif[0x011A] try: dpi = float(x_resolution[0]) / x_resolution[1] except TypeError: dpi = x_resolution if resolution_unit == 3: # cm # 1 dpcm = 2.54 dpi dpi *= 2.54 self.info[""dpi""] = int(dpi + 0.5), int(dpi + 0.5) except (KeyError, SyntaxError, ValueError, ZeroDivisionError): # SyntaxError for invalid/unreadable EXIF # KeyError for dpi not included # ZeroDivisionError for invalid dpi rational value # ValueError for x_resolution[0] being an invalid float self.info[""dpi""] = 72, 72 " 24305,"def _walk(top, follow_symlinks=True): """"""Modified version of https://docs.python.org/3/library/os.html#os.scandir that returns https://docs.python.org/3/library/os.html#os.DirEntry for files directly to take advantage of possible cached os.stat calls. """""" dirs = [] nondirs = [] try: scandir_iter = scandir(top) except OSError: return # Avoid repeated global lookups. get_next = next while True: try: entry = get_next(scandir_iter) except StopIteration: break except OSError: return try: is_dir = entry.is_dir(follow_symlinks=follow_symlinks) except OSError: is_dir = False if is_dir: dirs.append(entry) else: nondirs.append(entry) yield top, dirs, nondirs for dir_entry in dirs: for entry in walk(dir_entry.path, follow_symlinks=follow_symlinks): yield entry ","def _walk(top, follow_symlinks): """"""Modified version of https://docs.python.org/3/library/os.html#os.scandir that returns https://docs.python.org/3/library/os.html#os.DirEntry for files directly to take advantage of possible cached os.stat calls. """""" dirs = [] nondirs = [] try: scandir_iter = scandir(top) except OSError: return # Avoid repeated global lookups. get_next = next while True: try: entry = get_next(scandir_iter) except StopIteration: break except OSError: return try: is_dir = entry.is_dir(follow_symlinks=follow_symlinks) except OSError: is_dir = False if is_dir: dirs.append(entry) else: nondirs.append(entry) yield top, dirs, nondirs for dir_entry in dirs: for entry in walk(dir_entry.path, follow_symlinks=follow_symlinks): yield entry " 48365,"def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']), update_cache=dict(type='bool', aliases=['update-cache']), cache_valid_time=dict(type='int', default=0), purge=dict(type='bool', default=False), package=dict(type='list', aliases=['pkg', 'name']), deb=dict(type='path'), default_release=dict(type='str', aliases=['default-release']), install_recommends=dict(type='bool', aliases=['install-recommends']), force=dict(type='bool', default=False), upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']), dpkg_options=dict(type='str', default=DPKG_OPTIONS), autoremove=dict(type='bool', default=False), autoclean=dict(type='bool', default=False), policy_rc_d=dict(type='int', default=None), only_upgrade=dict(type='bool', default=False), force_apt_get=dict(type='bool', default=False), allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']), ), mutually_exclusive=[['deb', 'package', 'upgrade']], required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']], supports_check_mode=True, ) module.run_command_environ_update = APT_ENV_VARS if not HAS_PYTHON_APT: if module.check_mode: module.fail_json(msg=""%s must be installed to use check mode. "" ""If run normally this module can auto-install it."" % PYTHON_APT) try: module.warn(""Updating cache and auto-installing missing dependency: %s"" % PYTHON_APT) module.run_command(['apt-get', 'update'], check_rc=True) module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True) global apt, apt_pkg import apt import apt.debfile import apt_pkg except ImportError: module.fail_json(msg=""Could not import python modules: apt, apt_pkg. "" ""Please install %s package."" % PYTHON_APT) global APTITUDE_CMD APTITUDE_CMD = module.get_bin_path(""aptitude"", False) global APT_GET_CMD APT_GET_CMD = module.get_bin_path(""apt-get"") p = module.params if p['upgrade'] == 'no': p['upgrade'] = None use_apt_get = p['force_apt_get'] if not use_apt_get and not APTITUDE_CMD: module.warn(""Could not find aptitude. Using apt-get instead"") use_apt_get = True updated_cache = False updated_cache_time = 0 install_recommends = p['install_recommends'] allow_unauthenticated = p['allow_unauthenticated'] dpkg_options = expand_dpkg_options(p['dpkg_options']) autoremove = p['autoremove'] autoclean = p['autoclean'] # Get the cache object cache = get_cache(module) try: if p['default_release']: try: apt_pkg.config['APT::Default-Release'] = p['default_release'] except AttributeError: apt_pkg.Config['APT::Default-Release'] = p['default_release'] # reopen cache w/ modified config cache.open(progress=None) mtimestamp, updated_cache_time = get_updated_cache_time() # Cache valid time is default 0, which will update the cache if # needed and `update_cache` was set to true updated_cache = False if p['update_cache'] or p['cache_valid_time']: now = datetime.datetime.now() tdelta = datetime.timedelta(seconds=p['cache_valid_time']) if not mtimestamp + tdelta >= now: # Retry to update the cache up to 5 times with exponential backoff err = '' max_fail_count = 5 max_fail_sleep = 12 randint = random.randint(0, 1000) / 1000 for retry in range(max_fail_count): try: cache.update() break except apt.cache.FetchFailedException as e: err = to_native(e) # Use exponential backoff plus a little bit of randomness fail_sleep = 2 ** retry + randint if fail_sleep > max_fail_sleep: fail_sleep = max_fail_sleep + randint time.sleep(fail_sleep) else: module.fail_json(msg='Failed to update apt cache: %s' % err) cache.open(progress=None) mtimestamp, post_cache_update_time = get_updated_cache_time() if updated_cache_time != post_cache_update_time: updated_cache = True updated_cache_time = post_cache_update_time # If there is nothing else to do exit. This will set state as # changed based on if the cache was updated. if not p['package'] and not p['upgrade'] and not p['deb']: module.exit_json( changed=updated_cache, cache_updated=updated_cache, cache_update_time=updated_cache_time ) force_yes = p['force'] if p['upgrade']: upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated) if p['deb']: if p['state'] != 'present': module.fail_json(msg=""deb only supports state=present"") if '://' in p['deb']: p['deb'] = fetch_file(module, p['deb']) install_deb(module, p['deb'], cache, install_recommends=install_recommends, allow_unauthenticated=allow_unauthenticated, force=force_yes, dpkg_options=p['dpkg_options']) unfiltered_packages = p['package'] or () packages = [package.strip() for package in unfiltered_packages if package != '*'] all_installed = '*' in unfiltered_packages latest = p['state'] == 'latest' if latest and all_installed: if packages: module.fail_json(msg='unable to install additional packages when upgrading all installed packages') upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated) if packages: for package in packages: if package.count('=') > 1: module.fail_json(msg=""invalid package spec: %s"" % package) if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if not packages: if autoclean: cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options) if autoremove: cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options) if p['state'] in ('latest', 'present', 'build-dep', 'fixed'): state_upgrade = False state_builddep = False state_fixed = False if p['state'] == 'latest': state_upgrade = True if p['state'] == 'build-dep': state_builddep = True if p['state'] == 'fixed': state_fixed = True success, retvals = install( module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep, fixed=state_fixed, autoremove=autoremove, only_upgrade=p['only_upgrade'], allow_unauthenticated=allow_unauthenticated ) # Store if the cache has been updated retvals['cache_updated'] = updated_cache # Store when the update time was last retvals['cache_update_time'] = updated_cache_time if success: module.exit_json(**retvals) else: module.fail_json(**retvals) elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove) except apt.cache.LockFailedException: module.fail_json(msg=""Failed to lock apt for exclusive operation"") except apt.cache.FetchFailedException: module.fail_json(msg=""Could not fetch updated apt files"") ","def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']), update_cache=dict(type='bool', aliases=['update-cache']), cache_valid_time=dict(type='int', default=0), purge=dict(type='bool', default=False), package=dict(type='list', aliases=['pkg', 'name']), deb=dict(type='path'), default_release=dict(type='str', aliases=['default-release']), install_recommends=dict(type='bool', aliases=['install-recommends']), force=dict(type='bool', default=False), upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']), dpkg_options=dict(type='str', default=DPKG_OPTIONS), autoremove=dict(type='bool', default=False), autoclean=dict(type='bool', default=False), policy_rc_d=dict(type='int', default=None), only_upgrade=dict(type='bool', default=False), force_apt_get=dict(type='bool', default=False), allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']), ), mutually_exclusive=[['deb', 'package', 'upgrade']], required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']], supports_check_mode=True, ) module.run_command_environ_update = APT_ENV_VARS if not HAS_PYTHON_APT: if module.check_mode: module.fail_json(msg=""%s must be installed to use check mode. "" ""If run normally this module can auto-install it."" % PYTHON_APT) try: module.warn(""Updating cache and auto-installing missing dependency: %s"" % PYTHON_APT) module.run_command(['apt-get', 'update'], check_rc=True) module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True) global apt, apt_pkg import apt import apt.debfile import apt_pkg except ImportError: module.fail_json(msg=""Could not import python modules: apt, apt_pkg. "" ""Please install %s package."" % PYTHON_APT) global APTITUDE_CMD APTITUDE_CMD = module.get_bin_path(""aptitude"", False) global APT_GET_CMD APT_GET_CMD = module.get_bin_path(""apt-get"") p = module.params if p['upgrade'] == 'no': p['upgrade'] = None use_apt_get = p['force_apt_get'] if not use_apt_get and not APTITUDE_CMD: module.warn(""Could not find aptitude. Using apt-get instead"") use_apt_get = True updated_cache = False updated_cache_time = 0 install_recommends = p['install_recommends'] allow_unauthenticated = p['allow_unauthenticated'] dpkg_options = expand_dpkg_options(p['dpkg_options']) autoremove = p['autoremove'] autoclean = p['autoclean'] # Get the cache object cache = get_cache(module) try: if p['default_release']: try: apt_pkg.config['APT::Default-Release'] = p['default_release'] except AttributeError: apt_pkg.Config['APT::Default-Release'] = p['default_release'] # reopen cache w/ modified config cache.open(progress=None) mtimestamp, updated_cache_time = get_updated_cache_time() # Cache valid time is default 0, which will update the cache if # needed and `update_cache` was set to true updated_cache = False if p['update_cache'] or p['cache_valid_time']: now = datetime.datetime.now() tdelta = datetime.timedelta(seconds=p['cache_valid_time']) if not mtimestamp + tdelta >= now: # Retry to update the cache up to 5 times with exponential backoff err = '' max_fail_count = 5 max_fail_sleep = 12 randint = random.randint(0, 1000) / 1000 for retry in range(max_fail_count): try: cache.update() break except apt.cache.FetchFailedException as e: err = to_native(e) # Use exponential backoff plus a little bit of randomness fail_sleep = 2 ** retry + randint if fail_sleep > max_fail_sleep: fail_sleep = max_fail_sleep + randint time.sleep(fail_sleep) else: if err: module.fail_json(msg='Failed to update apt cache: %s' % err) else: module.fail_json(msg='Failed to update apt cache: no errors found.') cache.open(progress=None) mtimestamp, post_cache_update_time = get_updated_cache_time() if updated_cache_time != post_cache_update_time: updated_cache = True updated_cache_time = post_cache_update_time # If there is nothing else to do exit. This will set state as # changed based on if the cache was updated. if not p['package'] and not p['upgrade'] and not p['deb']: module.exit_json( changed=updated_cache, cache_updated=updated_cache, cache_update_time=updated_cache_time ) force_yes = p['force'] if p['upgrade']: upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated) if p['deb']: if p['state'] != 'present': module.fail_json(msg=""deb only supports state=present"") if '://' in p['deb']: p['deb'] = fetch_file(module, p['deb']) install_deb(module, p['deb'], cache, install_recommends=install_recommends, allow_unauthenticated=allow_unauthenticated, force=force_yes, dpkg_options=p['dpkg_options']) unfiltered_packages = p['package'] or () packages = [package.strip() for package in unfiltered_packages if package != '*'] all_installed = '*' in unfiltered_packages latest = p['state'] == 'latest' if latest and all_installed: if packages: module.fail_json(msg='unable to install additional packages when upgrading all installed packages') upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated) if packages: for package in packages: if package.count('=') > 1: module.fail_json(msg=""invalid package spec: %s"" % package) if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if not packages: if autoclean: cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options) if autoremove: cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options) if p['state'] in ('latest', 'present', 'build-dep', 'fixed'): state_upgrade = False state_builddep = False state_fixed = False if p['state'] == 'latest': state_upgrade = True if p['state'] == 'build-dep': state_builddep = True if p['state'] == 'fixed': state_fixed = True success, retvals = install( module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep, fixed=state_fixed, autoremove=autoremove, only_upgrade=p['only_upgrade'], allow_unauthenticated=allow_unauthenticated ) # Store if the cache has been updated retvals['cache_updated'] = updated_cache # Store when the update time was last retvals['cache_update_time'] = updated_cache_time if success: module.exit_json(**retvals) else: module.fail_json(**retvals) elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove) except apt.cache.LockFailedException: module.fail_json(msg=""Failed to lock apt for exclusive operation"") except apt.cache.FetchFailedException: module.fail_json(msg=""Could not fetch updated apt files"") " 29523,"def _check_tcpdump(): """""" Return True if the tcpdump command can be started """""" try: proc = subprocess.Popen( [conf.prog.tcpdump, ""--version""], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0] except OSError: return False # tcpdump acts strangely on some OSes and returns 1 # therefore we also checks the output return b""tcpdump"" in output or proc.returncode == 0 ","def _check_tcpdump(): """""" Return True if the tcpdump command can be started """""" try: proc = subprocess.Popen( [conf.prog.tcpdump, ""--version""], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0] except OSError: return False # tcpdump acts strangely on some OSes and returns 1 # On some systems, --version does not exist on tcpdump return b""tcpdump"" in output or proc.returncode == 0 " 32250,"def main(): # Args is always stronger. Get last run even stronger demisto_params = demisto.params() #| demisto.args() events_to_add_per_request = demisto_params.get('events_to_add_per_request', 2000) try: events_to_add_per_request = int(events_to_add_per_request) except ValueError: events_to_add_per_request = 2000 after = demisto_params['after'] api_key = demisto_params['api_key'] demisto_params['headers'] = {""Accept"": ""application/json"", ""Content-Type"": ""application/json"", ""Authorization"": f""SSWS {api_key}""} last_run = demisto.getLastRun() last_object_ids = last_run.get('ids') # If we do not have an after in the last run than we calculate after according to now - after param . if 'after' not in last_run: delta = datetime.today() - timedelta(days=after) last_run = delta.isoformat() else: last_run = last_run['after'] demisto_params['params'] = ReqParams(**demisto_params, since=last_run) request = Request(**demisto_params) client = Client(request) get_events = GetEvents(client) command = demisto.command() if command == 'test-module': get_events.aggregated_results() demisto.results('ok') elif command == 'okta-get-events' or command == 'fetch-events': try: events = get_events.aggregated_results(last_object_ids=last_object_ids) except Exception as e: raise Exception(str(e)) events_number = len(events) if events: demisto.setLastRun(GetEvents.get_last_run(events)) if command == 'fetch-events': demisto.updateModuleHealth({'eventsPulled': len(events)}) while len(events) > 0: send_events_to_xsiam(events[:events_to_add_per_request], 'okta', 'okta') events = events[events_to_add_per_request:] elif command == 'okta-get-events': command_results = CommandResults( readable_output=tableToMarkdown('Okta Logs', events, headerTransform=pascalToSpace), outputs_prefix='Okta.Logs', outputs_key_field='published', outputs=events, raw_response=events, ) return_results(command_results) demisto.updateModuleHealth({'eventsPulled': events_number}) ","def main(): # Args is always stronger. Get last run even stronger demisto_params = demisto.params() #| demisto.args() events_to_add_per_request = demisto_params.get('events_to_add_per_request', 2000) try: events_to_add_per_request = int(events_to_add_per_request) except ValueError: events_to_add_per_request = 2000 after = demisto_params['after'] api_key = demisto_params['api_key'] demisto_params['headers'] = {""Accept"": ""application/json"", ""Content-Type"": ""application/json"", ""Authorization"": f""SSWS {api_key}""} last_run = demisto.getLastRun() last_object_ids = last_run.get('ids') # If we do not have an after in the last run than we calculate after according to now - after param . if 'after' not in last_run: delta = datetime.today() - timedelta(days=after) last_run = delta.isoformat() else: last_run = last_run['after'] demisto_params['params'] = ReqParams(**demisto_params, since=last_run) request = Request(**demisto_params) client = Client(request) get_events = GetEvents(client) command = demisto.command() if command == 'test-module': get_events.aggregated_results() demisto.results('ok') elif command == 'okta-get-events' or command == 'fetch-events': try: events = get_events.aggregated_results(last_object_ids=last_object_ids) except Exception as e: raise Exception(str(e)) events_number = len(events) if events: demisto.setLastRun(GetEvents.get_last_run(events)) if command == 'fetch-events': demisto.updateModuleHealth({'eventsPulled': len(events)}) while events: send_events_to_xsiam(events[:events_to_add_per_request], 'okta', 'okta') events = events[events_to_add_per_request:] elif command == 'okta-get-events': command_results = CommandResults( readable_output=tableToMarkdown('Okta Logs', events, headerTransform=pascalToSpace), outputs_prefix='Okta.Logs', outputs_key_field='published', outputs=events, raw_response=events, ) return_results(command_results) demisto.updateModuleHealth({'eventsPulled': events_number}) " 7648,"def get_event_from_url(url): data = urlparse(url) if not all([data.scheme, data.netloc, data.path]): raise ValueError(_('Invalid event URL')) event_path = re.search('^/event/([0-9]+)/*', data.path) if not event_path: raise ValueError(_('Invalid event URL')) event_id = event_path.group(1) event = Event.get(event_id) if not event: raise ValueError(_('Event with id: {} does not exist'.format(event_id))) return event ","def get_event_from_url(url): data = urlparse(url) if not all([data.scheme, data.netloc, data.path]): raise ValueError(_('Invalid event URL')) event_path = re.search('^/event/([0-9]+)/*', data.path) if not event_path: raise ValueError(_('Invalid event URL')) event_id = event_path.group(1) event = Event.get(event_id, is_deleted=False) if not event: raise ValueError(_('Event with id: {} does not exist'.format(event_id))) return event " 45266,"def train( params: Dict, dtrain: ModinDMatrix, *args, evals=(), nthread: Optional[int] = cpu_count(), evenly_data_distribution: Optional[bool] = True, **kwargs, ): """""" Train XGBoost model. Parameters ---------- params : dict Booster params. dtrain : ModinDMatrix Data to be trained. nthread: int Number of threads for using in each node. By default it is equal to number of threads on master node. evenly_data_distribution: boolean, default True Whether make evenly distribution of partitions between nodes or not. In case `False` minimal datatransfer between nodes will be provided but the data may not be evenly distributed. \\*\\*kwargs: Other parameters are the same as `xgboost.train` except for `evals_result`, which is returned as part of function return value instead of argument. Returns ------- dict A dictionary containing trained booster and evaluation history. `history` field is the same as `eval_result` from `xgboost.train`. .. code-block:: python {'booster': xgboost.Booster, 'history': {'train': {'logloss': ['0.48253', '0.35953']}, 'eval': {'logloss': ['0.480385', '0.357756']}}} """""" LOGGER.info(""Training started"") s = time.time() X, y = dtrain assert len(X) == len(y) X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution) y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution) assert len(X_row_parts) == len(y_row_parts), ""Unaligned train data"" # Create remote actors actors = create_actors(nthread=nthread) add_as_eval_method = None if len(evals): for (eval_data, method) in evals: if id(eval_data) == id(dtrain): add_as_eval_method = method evals.remove((eval_data, method)) evals_unwrapped = [ ( ( unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution), unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution), eval_method, ) ) for ((eval_X, eval_y), eval_method) in evals ] for ( eval_X_row_parts, eval_y_row_parts, eval_method, ) in evals_unwrapped: # Split data across workers _split_data_across_actors( actors, lambda actor, *Xy: actor.add_eval_data.remote( *Xy, eval_method=eval_method ), eval_X_row_parts, eval_y_row_parts, evenly_data_distribution=evenly_data_distribution, ) # Split data across workers _split_data_across_actors( actors, lambda actor, *Xy: actor.set_train_data.remote( *Xy, add_as_eval_method=add_as_eval_method ), X_row_parts, y_row_parts, evenly_data_distribution=evenly_data_distribution, ) LOGGER.info(f""Data preparation time: {time.time() - s} s"") s = time.time() # Start Rabit tracker env = _start_rabit_tracker(len(actors)) rabit_args = [(""%s=%s"" % item).encode() for item in env.items()] # Train fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors] # All results should be the same because of Rabit tracking. So we just # return the first one. result = ray.get(fut[0]) LOGGER.info(f""Training time: {time.time() - s} s"") LOGGER.info(""Training finished"") return result ","def train( params: Dict, dtrain: ModinDMatrix, *args, evals=(), nthread: Optional[int] = cpu_count(), evenly_data_distribution: Optional[bool] = True, **kwargs, ): """""" Train XGBoost model. Parameters ---------- params : dict Booster params. dtrain : ModinDMatrix Data to be trained against. nthread: int Number of threads for using in each node. By default it is equal to number of threads on master node. evenly_data_distribution: boolean, default True Whether make evenly distribution of partitions between nodes or not. In case `False` minimal datatransfer between nodes will be provided but the data may not be evenly distributed. \\*\\*kwargs: Other parameters are the same as `xgboost.train` except for `evals_result`, which is returned as part of function return value instead of argument. Returns ------- dict A dictionary containing trained booster and evaluation history. `history` field is the same as `eval_result` from `xgboost.train`. .. code-block:: python {'booster': xgboost.Booster, 'history': {'train': {'logloss': ['0.48253', '0.35953']}, 'eval': {'logloss': ['0.480385', '0.357756']}}} """""" LOGGER.info(""Training started"") s = time.time() X, y = dtrain assert len(X) == len(y) X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution) y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution) assert len(X_row_parts) == len(y_row_parts), ""Unaligned train data"" # Create remote actors actors = create_actors(nthread=nthread) add_as_eval_method = None if len(evals): for (eval_data, method) in evals: if id(eval_data) == id(dtrain): add_as_eval_method = method evals.remove((eval_data, method)) evals_unwrapped = [ ( ( unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution), unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution), eval_method, ) ) for ((eval_X, eval_y), eval_method) in evals ] for ( eval_X_row_parts, eval_y_row_parts, eval_method, ) in evals_unwrapped: # Split data across workers _split_data_across_actors( actors, lambda actor, *Xy: actor.add_eval_data.remote( *Xy, eval_method=eval_method ), eval_X_row_parts, eval_y_row_parts, evenly_data_distribution=evenly_data_distribution, ) # Split data across workers _split_data_across_actors( actors, lambda actor, *Xy: actor.set_train_data.remote( *Xy, add_as_eval_method=add_as_eval_method ), X_row_parts, y_row_parts, evenly_data_distribution=evenly_data_distribution, ) LOGGER.info(f""Data preparation time: {time.time() - s} s"") s = time.time() # Start Rabit tracker env = _start_rabit_tracker(len(actors)) rabit_args = [(""%s=%s"" % item).encode() for item in env.items()] # Train fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors] # All results should be the same because of Rabit tracking. So we just # return the first one. result = ray.get(fut[0]) LOGGER.info(f""Training time: {time.time() - s} s"") LOGGER.info(""Training finished"") return result " 47506,"def apply_tesseract(image: Image.Image, lang: Optional[str], tess_config: Optional[str]): """"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type=""dict"", config=tess_config) words, left, top, width, height = data[""text""], data[""left""], data[""top""], data[""width""], data[""height""] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) assert len(words) == len(normalized_boxes), ""Not as many words as there are bounding boxes"" return words, normalized_boxes ","def apply_tesseract(image: Image.Image, lang: Optional[str], tesseract_config: Optional[str]): """"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type=""dict"", config=tess_config) words, left, top, width, height = data[""text""], data[""left""], data[""top""], data[""width""], data[""height""] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) assert len(words) == len(normalized_boxes), ""Not as many words as there are bounding boxes"" return words, normalized_boxes " 8071,"def _get_user_configdir(): """""" Return the string representing the configuration dir. The default is ""HOME/.sunpy"". You can override this with the SUNPY_CONFIGDIR environment variable """""" configdir = os.environ.get('SUNPY_CONFIGDIR') if configdir is not None: if not _is_writable_dir(configdir): raise RuntimeError('Could not write to SUNPY_CONFIGDIR=""{0}""' .format(configdir)) return configdir return CONFIG_DIR ","def _get_user_configdir(): """""" Return the string representing the configuration dir. The default is ""$HOME/.sunpy"". You can override this with the SUNPY_CONFIGDIR environment variable """""" configdir = os.environ.get('SUNPY_CONFIGDIR') if configdir is not None: if not _is_writable_dir(configdir): raise RuntimeError('Could not write to SUNPY_CONFIGDIR=""{0}""' .format(configdir)) return configdir return CONFIG_DIR " 40552,"def load_arguments(self, _): with self.argument_context('spring-cloud') as c: c.argument('resource_group', arg_type=resource_group_name_type) c.argument('name', options_list=[ '--name', '-n'], help='Name of Azure Spring Cloud.') # A refactoring work item to move validators to command level to reduce the duplications. # https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/ with self.argument_context('spring-cloud create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location) c.argument('sku', arg_type=sku_type, default='Standard') c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters) c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters) c.argument('app_subnet', help='The name or ID of an existing subnet in ""vnet"" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters) c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in ""vnet"" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet) c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group) c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group) c.argument('enable_java_agent', arg_type=get_three_state_flag(), help=""Java in process agent is now GA-ed and used by default when Application Insights enabled. "" ""This parameter is no longer needed and will be removed in future release."", validator=validate_java_agent_parameters, deprecate_info=c.deprecate(target='--enable-java-agent', hide=True)) c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_tracing_parameters_asc_create) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_tracing_parameters_asc_create) c.argument('sampling_rate', type=float, help=""Sampling Rate of application insights. Minimum is 0, maximum is 100."", validator=validate_tracing_parameters_asc_create) c.argument('disable_app_insights', arg_type=get_three_state_flag(), help=""Disable Application Insights, "" ""if not disabled and no existing Application Insights specified with "" ""--app-insights-key or --app-insights, "" ""will create a new Application Insights instance in the same resource group."", validator=validate_tracing_parameters_asc_create) c.argument('zone_redundant', arg_type=get_three_state_flag(), help=""Create your Azure Spring Cloud service in an Azure availability zone or not, "" ""this could only be supported in several regions at the moment "", default=False, is_preview=True) c.argument('build_pool_size', arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']), validator=validate_build_pool_size, default='S1', is_preview=True, help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.') with self.argument_context('spring-cloud update') as c: c.argument('sku', arg_type=sku_type) c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key', redirect='az spring-cloud app-insights update --app-insights-key', hide=True)) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --app-insights', redirect='az spring-cloud app-insights update --app-insights', hide=True)) c.argument('disable_app_insights', arg_type=get_three_state_flag(), help=""Disable Application Insights, "" ""if not disabled and no existing Application Insights specified with "" ""--app-insights-key or --app-insights, "" ""will create a new Application Insights instance in the same resource group."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights', redirect='az spring-cloud app-insights update --disable', hide=True)) c.argument('build_pool_size', arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']), is_preview=True, help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.') for scope in ['spring-cloud create', 'spring-cloud update']: with self.argument_context(scope) as c: c.argument('tags', arg_type=tags_type) with self.argument_context('spring-cloud test-endpoint renew-key') as c: c.argument('type', type=str, arg_type=get_enum_type( TestKeyType), help='Type of test-endpoint key') with self.argument_context('spring-cloud app') as c: c.argument('service', service_name_type) c.argument('name', name_type, help='Name of app.') with self.argument_context('spring-cloud app create') as c: c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.', default=False, options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)]) c.argument('assign_identity', arg_type=get_three_state_flag(), help='If true, assign managed service identity.') c.argument('cpu', arg_type=cpu_type, default=""1"") c.argument('memory', arg_type=memort_type, default=""1Gi"") c.argument('instance_count', type=int, default=1, help='Number of instance.', validator=validate_instance_count) c.argument('persistent_storage', type=str, help='A json file path for the persistent storages to be mounted to the app') c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str, help='A json file path indicates the certificates which would be loaded to app') with self.argument_context('spring-cloud app update') as c: c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.', options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)]) c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False) c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') c.argument('persistent_storage', type=str, help='A json file path for the persistent storages to be mounted to the app') c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'], help='A json file path indicates the certificates which would be loaded to app') with self.argument_context('spring-cloud app append-persistent-storage') as c: c.argument('storage_name', type=str, help='Name of the storage resource you created in Azure Spring Cloud.') c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.') c.argument('share_name', type=str, help=""The name of the pre-created file share. "" ""ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume."") c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.') c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None) c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False) for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']: with self.argument_context(scope) as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('main_entry', options_list=[ '--main-entry', '-m'], help=""The path to the .NET executable relative to zip root."") for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']: with self.argument_context(scope) as c: c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist) with self.argument_context('spring-cloud app identity assign') as c: c.argument('scope', help=""The scope the managed identity has access to"") c.argument('role', help=""Role name or id the managed identity will be assigned"") def prepare_logs_argument(c): '''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.''' c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.') c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines) c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true') c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since) c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit) c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}', help='Format JSON logs if structured log is enabled') with self.argument_context('spring-cloud app logs') as c: prepare_logs_argument(c) with self.argument_context('spring-cloud app log tail') as c: prepare_logs_argument(c) with self.argument_context('spring-cloud app set-deployment') as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment) for scope in ['spring-cloud app create', 'spring-cloud app update']: with self.argument_context(scope) as c: c.argument('enable_persistent_storage', arg_type=get_three_state_flag(), help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.') for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']: with self.argument_context(scope) as c: c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion), help='Runtime version of used language') c.argument('jvm_options', type=str, validator=validate_jvm_options, help=""A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'"") c.argument('env', env_type) c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.') with self.argument_context('spring-cloud app scale') as c: c.argument('cpu', arg_type=cpu_type) c.argument('memory', arg_type=memort_type) c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count) for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']: with self.argument_context(scope) as c: c.argument( 'artifact_path', options_list=['--artifact-path', c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True), c.deprecate(target='-p', redirect='--artifact-path', hide=True)], help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar) c.argument( 'disable_validation', arg_type=get_three_state_flag(), help='If true, disable jar validation.') c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True) c.argument( 'main_entry', options_list=[ '--main-entry', '-m'], help=""A string containing the path to the .NET executable relative to zip root."") c.argument( 'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy') c.argument( 'version', help='Deployment version, keep unchanged if not set.') c.argument( 'container_image', help='The container image tag.', arg_group='Custom Container') c.argument( 'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container') c.argument( 'registry_username', help='The username of the container registry.', arg_group='Custom Container') c.argument( 'registry_password', help='The password of the container registry.', arg_group='Custom Container') c.argument( 'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container') c.argument( 'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container') with self.argument_context('spring-cloud app deploy') as c: c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path) with self.argument_context('spring-cloud app deployment create') as c: c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path) with self.argument_context('spring-cloud app deployment create') as c: c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.', action='store_true') c.argument('cpu', arg_type=cpu_type) c.argument('memory', arg_type=memort_type) c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count) with self.argument_context('spring-cloud app deployment') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) c.argument('name', name_type, help='Name of deployment.') for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']: with self.argument_context(scope) as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('app_instance', help='Target app instance you want to dump.') c.argument('file_path', help='The mount file path for your dump file.') with self.argument_context('spring-cloud app deployment start-jfr') as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('app_instance', help='Target app instance you want to dump.') c.argument('file_path', help='The mount file path for your dump file.') c.argument('duration', type=str, default=""60s"", help='Duration of JFR.') with self.argument_context('spring-cloud app binding') as c: c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app) c.argument('name', name_type, help='Name of service binding.') for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']: with self.argument_context(scope) as c: c.argument('resource_id', validator=validate_resource_id, help='Azure resource ID of the service to bind with.') for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']: with self.argument_context(scope) as c: c.argument( 'database_name', help='Name of database. Required for mongo, sql, gremlin') c.argument( 'key_space', help='Cassandra key space. Required for cassandra') c.argument('collection_name', help='Name of collection. Required for gremlin') with self.argument_context('spring-cloud app binding cosmos add') as c: c.argument('api_type', help='Type of API.', arg_type=get_enum_type( ApiType), validator=validate_cosmos_type) for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']: with self.argument_context(scope) as c: c.argument('key', help='API key of the service.') c.argument('username', help='Username of the database') c.argument('database_name', help='Database name') for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']: with self.argument_context(scope) as c: c.argument('key', help='Api key of the service.') c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False) with self.argument_context('spring-cloud app append-loaded-public-certificate') as c: c.argument('certificate_name', help='Name of the certificate to be appended') c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False) with self.argument_context('spring-cloud config-server set') as c: c.argument('config_file', help='A yaml file path for the configuration of Spring Cloud config server') for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']: with self.argument_context(scope) as c: c.argument('uri', help='Uri of the added config.') c.argument('label', help='Label of the added config.') c.argument( 'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.') c.argument('username', help='Username of the added config.') c.argument('password', help='Password of the added config.') c.argument('host_key', help='Host key of the added config.') c.argument('host_key_algorithm', help='Host key algorithm of the added config.') c.argument('private_key', help='Private_key of the added config.') c.argument('strict_host_key_checking', help='Strict_host_key_checking of the added config.') for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']: with self.argument_context(scope) as c: c.argument('repo_name', help='Name of the repo.') for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']: with self.argument_context(scope) as c: c.argument( 'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns') with self.argument_context('spring-cloud test-endpoint list') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name) with self.argument_context('spring-cloud storage') as c: c.argument('service', service_name_type) c.argument('name', help='Name of storage.') with self.argument_context('spring-cloud storage add') as c: c.argument('storage_type', help='The type of the torage. e.g. StorageAccount') c.argument('account_name', help='The name of the storage account.') c.argument('account_key', help='The account key of the storage account.') with self.argument_context('spring-cloud storage update') as c: c.argument('storage_type', help='The type of the torage. e.g. StorageAccount') c.argument('account_name', help='The name of the storage account.') c.argument('account_key', help='The account key of the storage account.') with self.argument_context('spring-cloud certificate') as c: c.argument('service', service_name_type) c.argument('name', help='Name of certificate.') with self.argument_context('spring-cloud certificate add') as c: c.argument('vault_uri', help='The key vault uri where store the certificate') c.argument('vault_certificate_name', help='The certificate name in key vault') c.argument('only_public_cert', arg_type=get_three_state_flag(), help='If true, only import public certificate part from key vault.', default=False) c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'], help='A file path for the public certificate to be uploaded') with self.argument_context('spring-cloud certificate list') as c: c.argument('certificate_type', help='Type of uploaded certificate', arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate'])) with self.argument_context('spring-cloud app custom-domain') as c: c.argument('service', service_name_type) c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app) c.argument('domain_name', help='Name of custom domain.') with self.argument_context('spring-cloud app custom-domain bind') as c: c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.') c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') with self.argument_context('spring-cloud app custom-domain update') as c: c.argument('certificate', help='Certificate name in Azure Spring Cloud.') c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') with self.argument_context('spring-cloud app-insights update') as c: c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_app_insights_parameters) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_app_insights_parameters) c.argument('sampling_rate', type=float, help=""Sampling Rate of application insights. Maximum is 100."", validator=validate_app_insights_parameters) c.argument('disable', arg_type=get_three_state_flag(), help=""Disable Application Insights."", validator=validate_app_insights_parameters) for scope in ['spring-cloud service-registry']: with self.argument_context(scope) as c: c.argument('service', service_name_type, validator=only_support_enterprise) with self.argument_context('spring-cloud service-registry bind') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) with self.argument_context('spring-cloud service-registry unbind') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) for scope in ['spring-cloud build-service builder create', 'spring-cloud build-service builder update']: with self.argument_context(scope) as c: c.argument('builder_json', type=str, help=""The JSON array of builder."", validator=validate_builder_resource) c.argument('builder_file', type=str, help=""The file path of JSON array of builder."", validator=validate_builder_resource) with self.argument_context('spring-cloud build-service builder create') as c: c.argument('name', type=str, help=""The builder name."", validator=validate_builder_create) with self.argument_context('spring-cloud build-service builder update') as c: c.argument('name', type=str, help=""The builder name."", validator=validate_builder_update) for scope in ['spring-cloud build-service builder show', 'spring-cloud build-service builder delete']: with self.argument_context(scope) as c: c.argument('name', type=str, help=""The builder name."") ","def load_arguments(self, _): with self.argument_context('spring-cloud') as c: c.argument('resource_group', arg_type=resource_group_name_type) c.argument('name', options_list=[ '--name', '-n'], help='Name of Azure Spring Cloud.') # A refactoring work item to move validators to command level to reduce the duplications. # https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/ with self.argument_context('spring-cloud create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location) c.argument('sku', arg_type=sku_type, default='Standard') c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters) c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters) c.argument('app_subnet', help='The name or ID of an existing subnet in ""vnet"" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters) c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in ""vnet"" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet) c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group) c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group) c.argument('enable_java_agent', arg_type=get_three_state_flag(), help=""Java in process agent is now GA-ed and used by default when Application Insights enabled. "" ""This parameter is no longer needed and will be removed in future release."", validator=validate_java_agent_parameters, deprecate_info=c.deprecate(target='--enable-java-agent', hide=True)) c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_tracing_parameters_asc_create) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_tracing_parameters_asc_create) c.argument('sampling_rate', type=float, help=""Sampling Rate of application insights. Minimum is 0, maximum is 100."", validator=validate_tracing_parameters_asc_create) c.argument('disable_app_insights', arg_type=get_three_state_flag(), help=""Disable Application Insights, "" ""if not disabled and no existing Application Insights specified with "" ""--app-insights-key or --app-insights, "" ""will create a new Application Insights instance in the same resource group."", validator=validate_tracing_parameters_asc_create) c.argument('zone_redundant', arg_type=get_three_state_flag(), help=""Create your Azure Spring Cloud service in an Azure availability zone or not, "" ""this could only be supported in several regions at the moment "", default=False, is_preview=True) c.argument('build_pool_size', arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']), validator=validate_build_pool_size, default='S1', is_preview=True, help='Only support in enterprise tier now. Size of build agent pool. See Azure Spring Cloud Doc for size info.') with self.argument_context('spring-cloud update') as c: c.argument('sku', arg_type=sku_type) c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key', redirect='az spring-cloud app-insights update --app-insights-key', hide=True)) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --app-insights', redirect='az spring-cloud app-insights update --app-insights', hide=True)) c.argument('disable_app_insights', arg_type=get_three_state_flag(), help=""Disable Application Insights, "" ""if not disabled and no existing Application Insights specified with "" ""--app-insights-key or --app-insights, "" ""will create a new Application Insights instance in the same resource group."", validator=validate_tracing_parameters_asc_update, deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights', redirect='az spring-cloud app-insights update --disable', hide=True)) c.argument('build_pool_size', arg_type=get_enum_type(['S1', 'S2', 'S3', 'S4', 'S5']), is_preview=True, help='(Enterprise Tier Only) Size of build agent pool. See Azure Spring Cloud Doc for size info.') for scope in ['spring-cloud create', 'spring-cloud update']: with self.argument_context(scope) as c: c.argument('tags', arg_type=tags_type) with self.argument_context('spring-cloud test-endpoint renew-key') as c: c.argument('type', type=str, arg_type=get_enum_type( TestKeyType), help='Type of test-endpoint key') with self.argument_context('spring-cloud app') as c: c.argument('service', service_name_type) c.argument('name', name_type, help='Name of app.') with self.argument_context('spring-cloud app create') as c: c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.', default=False, options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)]) c.argument('assign_identity', arg_type=get_three_state_flag(), help='If true, assign managed service identity.') c.argument('cpu', arg_type=cpu_type, default=""1"") c.argument('memory', arg_type=memort_type, default=""1Gi"") c.argument('instance_count', type=int, default=1, help='Number of instance.', validator=validate_instance_count) c.argument('persistent_storage', type=str, help='A json file path for the persistent storages to be mounted to the app') c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str, help='A json file path indicates the certificates which would be loaded to app') with self.argument_context('spring-cloud app update') as c: c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.', options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)]) c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False) c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') c.argument('persistent_storage', type=str, help='A json file path for the persistent storages to be mounted to the app') c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'], help='A json file path indicates the certificates which would be loaded to app') with self.argument_context('spring-cloud app append-persistent-storage') as c: c.argument('storage_name', type=str, help='Name of the storage resource you created in Azure Spring Cloud.') c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.') c.argument('share_name', type=str, help=""The name of the pre-created file share. "" ""ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume."") c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.') c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None) c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False) for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']: with self.argument_context(scope) as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('main_entry', options_list=[ '--main-entry', '-m'], help=""The path to the .NET executable relative to zip root."") for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']: with self.argument_context(scope) as c: c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist) with self.argument_context('spring-cloud app identity assign') as c: c.argument('scope', help=""The scope the managed identity has access to"") c.argument('role', help=""Role name or id the managed identity will be assigned"") def prepare_logs_argument(c): '''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.''' c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.') c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines) c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true') c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since) c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit) c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}', help='Format JSON logs if structured log is enabled') with self.argument_context('spring-cloud app logs') as c: prepare_logs_argument(c) with self.argument_context('spring-cloud app log tail') as c: prepare_logs_argument(c) with self.argument_context('spring-cloud app set-deployment') as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment) for scope in ['spring-cloud app create', 'spring-cloud app update']: with self.argument_context(scope) as c: c.argument('enable_persistent_storage', arg_type=get_three_state_flag(), help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.') for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']: with self.argument_context(scope) as c: c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion), help='Runtime version of used language') c.argument('jvm_options', type=str, validator=validate_jvm_options, help=""A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'"") c.argument('env', env_type) c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.') with self.argument_context('spring-cloud app scale') as c: c.argument('cpu', arg_type=cpu_type) c.argument('memory', arg_type=memort_type) c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count) for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']: with self.argument_context(scope) as c: c.argument( 'artifact_path', options_list=['--artifact-path', c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True), c.deprecate(target='-p', redirect='--artifact-path', hide=True)], help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar) c.argument( 'disable_validation', arg_type=get_three_state_flag(), help='If true, disable jar validation.') c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True) c.argument( 'main_entry', options_list=[ '--main-entry', '-m'], help=""A string containing the path to the .NET executable relative to zip root."") c.argument( 'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy') c.argument( 'version', help='Deployment version, keep unchanged if not set.') c.argument( 'container_image', help='The container image tag.', arg_group='Custom Container') c.argument( 'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container') c.argument( 'registry_username', help='The username of the container registry.', arg_group='Custom Container') c.argument( 'registry_password', help='The password of the container registry.', arg_group='Custom Container') c.argument( 'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container') c.argument( 'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container') with self.argument_context('spring-cloud app deploy') as c: c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path) with self.argument_context('spring-cloud app deployment create') as c: c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path) with self.argument_context('spring-cloud app deployment create') as c: c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.', action='store_true') c.argument('cpu', arg_type=cpu_type) c.argument('memory', arg_type=memort_type) c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count) with self.argument_context('spring-cloud app deployment') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) c.argument('name', name_type, help='Name of deployment.') for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']: with self.argument_context(scope) as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('app_instance', help='Target app instance you want to dump.') c.argument('file_path', help='The mount file path for your dump file.') with self.argument_context('spring-cloud app deployment start-jfr') as c: c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param) c.argument('app_instance', help='Target app instance you want to dump.') c.argument('file_path', help='The mount file path for your dump file.') c.argument('duration', type=str, default=""60s"", help='Duration of JFR.') with self.argument_context('spring-cloud app binding') as c: c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app) c.argument('name', name_type, help='Name of service binding.') for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']: with self.argument_context(scope) as c: c.argument('resource_id', validator=validate_resource_id, help='Azure resource ID of the service to bind with.') for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']: with self.argument_context(scope) as c: c.argument( 'database_name', help='Name of database. Required for mongo, sql, gremlin') c.argument( 'key_space', help='Cassandra key space. Required for cassandra') c.argument('collection_name', help='Name of collection. Required for gremlin') with self.argument_context('spring-cloud app binding cosmos add') as c: c.argument('api_type', help='Type of API.', arg_type=get_enum_type( ApiType), validator=validate_cosmos_type) for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']: with self.argument_context(scope) as c: c.argument('key', help='API key of the service.') c.argument('username', help='Username of the database') c.argument('database_name', help='Database name') for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']: with self.argument_context(scope) as c: c.argument('key', help='Api key of the service.') c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False) with self.argument_context('spring-cloud app append-loaded-public-certificate') as c: c.argument('certificate_name', help='Name of the certificate to be appended') c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False) with self.argument_context('spring-cloud config-server set') as c: c.argument('config_file', help='A yaml file path for the configuration of Spring Cloud config server') for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']: with self.argument_context(scope) as c: c.argument('uri', help='Uri of the added config.') c.argument('label', help='Label of the added config.') c.argument( 'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.') c.argument('username', help='Username of the added config.') c.argument('password', help='Password of the added config.') c.argument('host_key', help='Host key of the added config.') c.argument('host_key_algorithm', help='Host key algorithm of the added config.') c.argument('private_key', help='Private_key of the added config.') c.argument('strict_host_key_checking', help='Strict_host_key_checking of the added config.') for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']: with self.argument_context(scope) as c: c.argument('repo_name', help='Name of the repo.') for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']: with self.argument_context(scope) as c: c.argument( 'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns') with self.argument_context('spring-cloud test-endpoint list') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) c.argument('deployment', options_list=[ '--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name) with self.argument_context('spring-cloud storage') as c: c.argument('service', service_name_type) c.argument('name', help='Name of storage.') with self.argument_context('spring-cloud storage add') as c: c.argument('storage_type', help='The type of the torage. e.g. StorageAccount') c.argument('account_name', help='The name of the storage account.') c.argument('account_key', help='The account key of the storage account.') with self.argument_context('spring-cloud storage update') as c: c.argument('storage_type', help='The type of the torage. e.g. StorageAccount') c.argument('account_name', help='The name of the storage account.') c.argument('account_key', help='The account key of the storage account.') with self.argument_context('spring-cloud certificate') as c: c.argument('service', service_name_type) c.argument('name', help='Name of certificate.') with self.argument_context('spring-cloud certificate add') as c: c.argument('vault_uri', help='The key vault uri where store the certificate') c.argument('vault_certificate_name', help='The certificate name in key vault') c.argument('only_public_cert', arg_type=get_three_state_flag(), help='If true, only import public certificate part from key vault.', default=False) c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'], help='A file path for the public certificate to be uploaded') with self.argument_context('spring-cloud certificate list') as c: c.argument('certificate_type', help='Type of uploaded certificate', arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate'])) with self.argument_context('spring-cloud app custom-domain') as c: c.argument('service', service_name_type) c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app) c.argument('domain_name', help='Name of custom domain.') with self.argument_context('spring-cloud app custom-domain bind') as c: c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.') c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') with self.argument_context('spring-cloud app custom-domain update') as c: c.argument('certificate', help='Certificate name in Azure Spring Cloud.') c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls') with self.argument_context('spring-cloud app-insights update') as c: c.argument('app_insights_key', help=""Connection string (recommended) or Instrumentation key of the existing Application Insights."", validator=validate_app_insights_parameters) c.argument('app_insights', help=""Name of the existing Application Insights in the same Resource Group. "" ""Or Resource ID of the existing Application Insights in a different Resource Group."", validator=validate_app_insights_parameters) c.argument('sampling_rate', type=float, help=""Sampling Rate of application insights. Maximum is 100."", validator=validate_app_insights_parameters) c.argument('disable', arg_type=get_three_state_flag(), help=""Disable Application Insights."", validator=validate_app_insights_parameters) for scope in ['spring-cloud service-registry']: with self.argument_context(scope) as c: c.argument('service', service_name_type, validator=only_support_enterprise) with self.argument_context('spring-cloud service-registry bind') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) with self.argument_context('spring-cloud service-registry unbind') as c: c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name) for scope in ['spring-cloud build-service builder create', 'spring-cloud build-service builder update']: with self.argument_context(scope) as c: c.argument('builder_json', type=str, help=""The JSON array of builder."", validator=validate_builder_resource) c.argument('builder_file', type=str, help=""The file path of JSON array of builder."", validator=validate_builder_resource) with self.argument_context('spring-cloud build-service builder create') as c: c.argument('name', type=str, help=""The builder name."", validator=validate_builder_create) with self.argument_context('spring-cloud build-service builder update') as c: c.argument('name', type=str, help=""The builder name."", validator=validate_builder_update) for scope in ['spring-cloud build-service builder show', 'spring-cloud build-service builder delete']: with self.argument_context(scope) as c: c.argument('name', type=str, help=""The builder name."") " 1595,"def _incremental_weighted_mean_and_var(X, sample_weight, last_weighted_mean, last_weighted_variance, last_weight_sum): """"""Calculate weighted mean and variance batch update last_weighted_mean and last_weighted_variance are statistics computed at the last step by the function. Both must be initialized to 0.0. In case no scaling is required last_weighted_variance can be None. The weighted_mean is always required and returned because necessary for the calculation of the weighted_variance. last_weight sum is the sum of weights encountered until now. Derived from the paper ""Incremental calculation of weighted mean and variance"", by Tony Finch. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for statistics update sample_weight : array-like, shape (n_samples,) last_weighted_mean : array-like, shape: (n_features,) last_weighted_variance : array-like, shape: (n_features,) last_weight_sum : array-like, shape (n_features,) Returns ------- updated_weighted_mean : array, shape (n_features,) updated_weighted_variance : array, shape (n_features,) If None, only weighted_mean is computed updated_weight_sum : array, shape (n_features,) Notes ----- NaNs in X are ignored. References ---------- Tony Finch ""Incremental calculation of weighted mean and variance"" University of Cambridge Computing Service, February 2009 """""" # last = stats until now # new = the current increment # updated = the aggregated stats M = np.isnan(X) sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1))) new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel() total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0) X_0 = np.where(np.isnan(X), 0, X) new_weighted_mean = \ _safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0) new_weighted_mean *= total_weight_sum / new_weight_sum updated_weight_sum = last_weight_sum + new_weight_sum updated_weighted_mean = ( (last_weight_sum * last_weighted_mean + new_weight_sum * new_weighted_mean) / updated_weight_sum) if last_weighted_variance is None: updated_weighted_variance = None else: X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2) new_weighted_variance = \ _safe_accumulator_op( np.average, X_0, weights=sample_weight, axis=0) new_weighted_variance *= total_weight_sum / new_weight_sum new_element = ( new_weight_sum * (new_weighted_variance + (new_weighted_mean - updated_weighted_mean) ** 2)) last_element = ( last_weight_sum * (last_weighted_variance + (last_weighted_mean - updated_weighted_mean) ** 2)) updated_weighted_variance = ( new_element + last_element) / updated_weight_sum return updated_weighted_mean, updated_weighted_variance, updated_weight_sum ","def _incremental_weighted_mean_and_var(X, sample_weight, last_weighted_mean, last_weighted_variance, last_weight_sum): """"""Calculate weighted mean and variance batch update last_weighted_mean and last_weighted_variance are statistics computed at the last step by the function. Both must be initialized to 0.0. In case no scaling is required last_weighted_variance can be None. The weighted_mean is always required and returned because necessary for the calculation of the weighted_variance. last_weight sum is the sum of weights encountered until now. Derived from the paper ""Incremental calculation of weighted mean and variance"", by Tony Finch. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for statistics update sample_weight : array-like, shape (n_samples,) last_weighted_mean : array-like, shape: (n_features,) last_weighted_variance : array-like, shape: (n_features,) last_weight_sum : array-like, shape (n_features,) Returns ------- updated_weighted_mean : array, shape (n_features,) updated_weighted_variance : array, shape (n_features,) If None, only weighted_mean is computed updated_weight_sum : array, shape (n_features,) Notes ----- NaNs in X are ignored. References ---------- Tony Finch ""Incremental calculation of weighted mean and variance"" University of Cambridge Computing Service, February 2009 """""" # last = stats until now # new = the current increment # updated = the aggregated stats M = np.isnan(X) sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1))) new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel() total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0) X_0 = np.where(np.isnan(X), 0, X) new_weighted_mean = \ _safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0) new_weighted_mean *= total_weight_sum / new_weight_sum updated_weight_sum = last_weight_sum + new_weight_sum updated_weighted_mean = ( (last_weight_sum * last_weighted_mean + new_weight_sum * new_weighted_mean) / updated_weight_sum) if last_weighted_variance is None: updated_weighted_variance = None else: X_0 = np.where(nan_mask, 0, (X-new_weighted_mean)**2) new_weighted_variance = \ _safe_accumulator_op( np.average, X_0, weights=sample_weight, axis=0) new_weighted_variance *= total_weight_sum / new_weight_sum new_element = ( new_weight_sum * (new_weighted_variance + (new_weighted_mean - updated_weighted_mean) ** 2)) last_element = ( last_weight_sum * (last_weighted_variance + (last_weighted_mean - updated_weighted_mean) ** 2)) updated_weighted_variance = ( new_element + last_element) / updated_weight_sum return updated_weighted_mean, updated_weighted_variance, updated_weight_sum " 56398,"def add_arguments_to_parser(parser): """""" Add the subcommand's arguments to the given argparse.ArgumentParser. """""" parser.add_argument('-o', '--output', type=str, dest=""output_dir"", required=False, default=argparse.SUPPRESS, help=""Store the analysis output in the given folder. "" ""If it is not given then the results go into a "" ""temporary directory which will be removed after "" ""the analysis."") parser.add_argument('-t', '--type', '--output-format', dest=""output_format"", required=False, choices=['plist'], default='plist', help=""Specify the format the analysis results "" ""should use."") parser.add_argument('-q', '--quiet', dest=""quiet"", action='store_true', required=False, default=argparse.SUPPRESS, help=""If specified, the build tool's and the "" ""analyzers' output will not be printed to the "" ""standard output."") parser.add_argument('--keep-gcc-include-fixed', dest=""keep_gcc_include_fixed"", required=False, action='store_true', default=False, help=""There are some implicit include paths which are "" ""only used by GCC (include-fixed). This flag "" ""determines whether these should be kept among "" ""the implicit include paths."") parser.add_argument('--keep-gcc-intrin', dest=""keep_gcc_intrin"", required=False, action='store_true', default=False, help=""There are some implicit include paths which "" ""contain GCC-specific header files (those "" ""which end with intrin.h). This flag determines "" ""whether these should be kept among the implicit "" ""include paths. Use this flag if Clang analysis "" ""fails with error message related to __builtin "" ""symbols."") log_args = parser.add_argument_group( ""log arguments"", """""" Specify how the build information database should be obtained. You need to specify either an already existing log file, or a build command which will be used to generate a log file on the fly."""""") log_args = log_args.add_mutually_exclusive_group(required=True) log_args.add_argument('-b', '--build', type=str, dest=""command"", default=argparse.SUPPRESS, help=""Execute and record a build command. Build "" ""commands can be simple calls to 'g++' or "" ""'clang++' or 'make', but a more complex "" ""command, or the call of a custom script file "" ""is also supported."") log_args.add_argument('-l', '--logfile', type=str, dest=""logfile"", default=argparse.SUPPRESS, help=""Use an already existing JSON compilation "" ""command database file specified at this path."") analyzer_opts = parser.add_argument_group(""analyzer arguments"") analyzer_opts.add_argument('-j', '--jobs', type=int, dest=""jobs"", required=False, default=1, help=""Number of threads to use in analysis. "" ""More threads mean faster analysis at "" ""the cost of using more memory."") analyzer_opts.add_argument('-c', '--clean', dest=""clean"", required=False, action='store_true', default=argparse.SUPPRESS, help=""Delete analysis reports stored in the "" ""output directory. (By default, "" ""CodeChecker would keep reports and "" ""overwrites only those files that were "" ""update by the current build command)."") parser.add_argument('--compile-uniqueing', type=str, dest=""compile_uniqueing"", default=""none"", required=False, help=""Specify the method the compilation "" ""actions in the compilation database are "" ""uniqued before analysis. "" ""CTU analysis works properly only if "" ""there is exactly one "" ""compilation action per source file. "" ""none(default in non CTU mode): "" ""no uniqueing is done. "" ""strict: no uniqueing is done, "" ""and an error is given if "" ""there is more than one compilation "" ""action for a source file. "" ""alpha(default in CTU mode): If there is more "" ""than one compilation action for a source "" ""file, only the one is kept that belongs to the "" ""alphabetically first "" ""compilation target. "" ""If none of the above given, "" ""this parameter should "" ""be a python regular expression."" ""If there is more than one compilation action "" ""for a source, "" ""only the one is kept which matches the "" ""given python regex. If more than one "" ""matches an error is given. "" ""The whole compilation "" ""action text is searched for match."") analyzer_opts.add_argument('--report-hash', dest=""report_hash"", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help=""R|Specify the hash calculation method "" ""for reports. By default the calculation "" ""method for Clang Static Analyzer is "" ""context sensitive and for Clang Tidy it "" ""is context insensitive.\nYou can use the "" ""following calculation methods:\n"" ""- context-free: there was a bug and for "" ""Clang Tidy not the context free hash "" ""was generated (kept for backward "" ""compatibility).\n"" ""- context-free-v2: context free hash is "" ""used for ClangSA and Clang Tidy.\n"" ""See the 'issue hashes' section of the "" ""help message of this command below for "" ""more information.\n"" ""USE WISELY AND AT YOUR OWN RISK!"") skip_mode = analyzer_opts.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest=""skipfile"", required=False, default=argparse.SUPPRESS, help=""Path to the Skipfile dictating which project "" ""files should be omitted from analysis. "" ""Please consult the User guide on how a "" ""Skipfile should be laid out."") skip_mode.add_argument('--file', nargs='+', dest=""files"", metavar='FILE', required=False, default=argparse.SUPPRESS, help=""Analyze only the given file(s) not the whole "" ""compilation database. Absolute directory "" ""paths should start with '/', relative "" ""directory paths should start with '*' and "" ""it can contain path glob pattern. "" ""Example: '/path/to/main.cpp', 'lib/*.cpp', "" ""*/test*'."") analyzer_opts.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help=""Run analysis only with the analyzers "" ""specified. Currently supported analyzers "" ""are: "" + ', '.join(analyzer_types. supported_analyzers) + ""."") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help=""Store standard output and standard error "" ""of successful analyzer invocations "" ""into the '/success' "" ""directory."") analyzer_opts.add_argument('--config', dest='config_file', required=False, help=""R|Allow the configuration from an "" ""explicit JSON based configuration file. "" ""The value of the 'analyzer' key in the "" ""config file will be emplaced as command "" ""line arguments. The format of "" ""configuration file is:\n"" ""{\n"" "" \""analyzer\"": [\n"" "" \""--enable=core.DivideZero\"",\n"" "" \""--enable=core.CallAndMessage\"",\n"" "" \""--report-hash=context-free-v2\"",\n"" "" \""--verbose=debug\"",\n"" "" \""--skip=$HOME/project/skip.txt\"",\n"" "" \""--clean\""\n"" "" ]\n"" ""}.\n"" ""You can use any environment variable "" ""inside this file and it will be "" ""expaneded."") # TODO: One day, get rid of these. See Issue #36, #427. analyzer_opts.add_argument('--saargs', dest=""clangsa_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang Static "" ""analyzer."") analyzer_opts.add_argument('--tidyargs', dest=""tidy_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang-Tidy "" ""analyzer."") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help=""A file in YAML format containing the "" ""configuration of clang-tidy checkers. "" ""The file can be dumped by "" ""'CodeChecker analyzers --dump-config "" ""clang-tidy' command."") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=[""clang-tidy:HeaderFilterRegex=.*""], help=""Analyzer configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker analyzers "" ""--analyzer-config'. To disable the "" ""default behaviour of this option you can "" ""use the "" ""'clang-tidy:take-config-from-directory="" ""true' option."") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help=""Checker configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker checkers --checker-config'."") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help=""The amount of time (in seconds) that "" ""each analyzer can spend, individually, "" ""to analyze the project. If the analysis "" ""of a particular file takes longer than "" ""this time, the analyzer is killed and "" ""the analysis is considered as a failed "" ""one."") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help=""Enable the z3 solver backend. This "" ""allows reasoning over more complex "" ""queries, but performance is worse "" ""than the default range-based "" ""constraint solver."") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument('--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help=""Switch on/off the Z3 SMT Solver "" ""backend to "" ""reduce false positives. The results "" ""of the ranged based constraint "" ""solver in the Clang Static Analyzer "" ""will be cross checked with the Z3 "" ""SMT solver. This should not cause "" ""that much of a slowdown compared to "" ""using the Z3 solver only."") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( ""cross translation unit analysis arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker check' is called."""""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform Cross Translation Unit (CTU) "" ""analysis, both 'collect' and 'analyze' "" ""phases. In this mode, the extra files "" ""created by 'collect' are cleaned up "" ""after the analysis."") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the first, 'collect' phase of "" ""Cross-TU analysis. This phase generates "" ""extra files needed by CTU analysis, and "" ""puts them into '/ctu-dir'. "" ""NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the second, 'analyze' phase of "" ""Cross-TU analysis, using already "" ""available extra files in "" ""'/ctu-dir'. (These files "" ""will not be cleaned up in this mode.)"") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help=""DEPRECATED. The flag will be removed. "" ""If Cross-TU analysis is enabled and "" ""fails for some reason, try to re analyze "" ""the same translation unit without "" ""Cross-TU enabled."") # Only check for AST loading modes if CTU is available. if analyzer_types.is_ctu_on_demand_available(context): ctu_opts.add_argument('--ctu-ast-mode', action='store', dest='ctu_ast_mode', choices=['load-from-pch', 'parse-on-demand'], default='parse-on-demand', help=""Choose the way ASTs are loaded during "" ""CTU analysis. Mode 'load-from-pch' "" ""generates PCH format serialized ASTs "" ""during the 'collect' phase. Mode "" ""'parse-on-demand' only generates the "" ""invocations needed to parse the ASTs. "" ""Mode 'load-from-pch' can use "" ""significant disk-space for the "" ""serialized ASTs, while mode "" ""'parse-on-demand' can incur some "" ""runtime CPU overhead in the second "" ""phase of the analysis."") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( ""Statistics analysis feature arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available)."""""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help=""Perform the first, 'collect' phase of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis, and "" ""puts them into "" ""''."" "" NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help=""Use the previously generated statistics "" ""results for the analysis from the given "" ""''."") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help=""Perform both phases of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis and enables "" ""the statistical checkers. "" ""No need to enable them explicitly."") stat_opts.add_argument('--stats-min-sample-count', action='store', default=""10"", type=int, dest='stats_min_sample_count', help=""Minimum number of samples (function call"" "" occurrences) to be collected"" "" for a statistics to be relevant."") stat_opts.add_argument('--stats-relevance-threshold', action='store', default=""0.85"", type=float, dest='stats_relevance_threshold', help=""The minimum ratio of calls of function "" ""f that must have a certain property "" ""property to consider it true for that "" ""function (calculated as calls "" ""with a property/all calls)."" "" CodeChecker will warn for"" "" calls of f do not have that property."") checkers_opts = parser.add_argument_group( ""checker configuration"", """""" Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or ""checkers"". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy. Profiles ------------------------------------------------ In CodeCheckers there is a manual grouping of checkers. These groups are called profiles. The collection of profiles is found in config/checker_profile_map.json file. The goal of these profile is that you can enable or disable checkers by these profiles. See the output of ""CodeChecker checkers --profile list"" command. Guidelines ------------------------------------------------ There are several coding guidelines like CppCoreGuideline, SEI-CERT, etc. These are collections of best programming practices to avoid common programming errors. Some checkers cover the rules of these guidelines. In CodeChecker there is a mapping between guidelines and checkers. This way you can list and enable those checkers which check the fulfillment of certain guideline rules. See the output of ""CodeChecker checkers --guideline"" command."""""") checkers_opts.add_argument('-e', '--enable', dest=""enable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group), "" ""profile or guideline "" ""to BE USED in the analysis. In case of "" ""ambiguity the priority order is profile, "" ""guideline, checker name (e.g. security "" ""means the profile, not the checker "" ""group). Profiles and guidelines can be "" ""labeled: 'profile:security' or "" ""'guideline:sei-cert'."") checkers_opts.add_argument('-d', '--disable', dest=""disable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group), "" ""profile or guideline "" ""to BE PROHIBITED from use in the "" ""analysis. In case of "" ""ambiguity the priority order is profile, "" ""guideline, checker name (e.g. security "" ""means the profile, not the checker "" ""group). Profiles and guidelines can be "" ""labeled: 'profile:security' or "" ""'guideline:sei-cert'."") checkers_opts.add_argument('--enable-all', dest=""enable_all"", action='store_true', required=False, default=argparse.SUPPRESS, help=""Force the running analyzers to use "" ""almost every checker available. The "" ""checker groups 'alpha.', 'debug.'"" ""'osx.', 'abseil-', 'android-',"" ""'darwin-', 'objc-', 'cppcoreguidelines-',"" ""'fuchsia.', 'fuchsia-', 'hicpp-', 'llvm-',"" ""'llvmlibc-', 'google-', 'zircon-'"" ""'osx.' (on Linux) are NOT enabled "" ""automatically and must be EXPLICITLY "" ""specified. WARNING! Enabling all "" ""checkers might result in the analysis "" ""losing precision and stability, and "" ""could even result in a total failure of "" ""the analysis. USE WISELY AND AT YOUR "" ""OWN RISK!"") output_opts = parser.add_argument_group(""output arguments"") output_opts.add_argument('--print-steps', dest=""print_steps"", action=""store_true"", required=False, default=argparse.SUPPRESS, help=""Print the steps the analyzers took in "" ""finding the reported defect."") parser.add_argument('--review-status', nargs='*', dest=""review_status"", metavar='REVIEW_STATUS', choices=REVIEW_STATUS_VALUES, default=[""confirmed"", ""unreviewed""], help=""Filter results by review statuses. Valid "" ""values are: {0}"".format( ', '.join(REVIEW_STATUS_VALUES))) logger.add_verbose_arguments(parser) parser.set_defaults(func=main) ","def add_arguments_to_parser(parser): """""" Add the subcommand's arguments to the given argparse.ArgumentParser. """""" parser.add_argument('-o', '--output', type=str, dest=""output_dir"", required=False, default=argparse.SUPPRESS, help=""Store the analysis output in the given folder. "" ""If it is not given then the results go into a "" ""temporary directory which will be removed after "" ""the analysis."") parser.add_argument('-t', '--type', '--output-format', dest=""output_format"", required=False, choices=['plist'], default='plist', help=""Specify the format the analysis results "" ""should use."") parser.add_argument('-q', '--quiet', dest=""quiet"", action='store_true', required=False, default=argparse.SUPPRESS, help=""If specified, the build tool's and the "" ""analyzers' output will not be printed to the "" ""standard output."") parser.add_argument('--keep-gcc-include-fixed', dest=""keep_gcc_include_fixed"", required=False, action='store_true', default=False, help=""There are some implicit include paths which are "" ""only used by GCC (include-fixed). This flag "" ""determines whether these should be kept among "" ""the implicit include paths."") parser.add_argument('--keep-gcc-intrin', dest=""keep_gcc_intrin"", required=False, action='store_true', default=False, help=""There are some implicit include paths which "" ""contain GCC-specific header files (those "" ""which end with intrin.h). This flag determines "" ""whether these should be kept among the implicit "" ""include paths. Use this flag if Clang analysis "" ""fails with error message related to __builtin "" ""symbols."") log_args = parser.add_argument_group( ""log arguments"", """""" Specify how the build information database should be obtained. You need to specify either an already existing log file, or a build command which will be used to generate a log file on the fly."""""") log_args = log_args.add_mutually_exclusive_group(required=True) log_args.add_argument('-b', '--build', type=str, dest=""command"", default=argparse.SUPPRESS, help=""Execute and record a build command. Build "" ""commands can be simple calls to 'g++' or "" ""'clang++' or 'make', but a more complex "" ""command, or the call of a custom script file "" ""is also supported."") log_args.add_argument('-l', '--logfile', type=str, dest=""logfile"", default=argparse.SUPPRESS, help=""Use an already existing JSON compilation "" ""command database file specified at this path."") analyzer_opts = parser.add_argument_group(""analyzer arguments"") analyzer_opts.add_argument('-j', '--jobs', type=int, dest=""jobs"", required=False, default=1, help=""Number of threads to use in analysis. "" ""More threads mean faster analysis at "" ""the cost of using more memory."") analyzer_opts.add_argument('-c', '--clean', dest=""clean"", required=False, action='store_true', default=argparse.SUPPRESS, help=""Delete analysis reports stored in the "" ""output directory. (By default, "" ""CodeChecker would keep reports and "" ""overwrites only those files that were "" ""update by the current build command)."") parser.add_argument('--compile-uniqueing', type=str, dest=""compile_uniqueing"", default=""none"", required=False, help=""Specify the method the compilation "" ""actions in the compilation database are "" ""uniqued before analysis. "" ""CTU analysis works properly only if "" ""there is exactly one "" ""compilation action per source file. "" ""none(default in non CTU mode): "" ""no uniqueing is done. "" ""strict: no uniqueing is done, "" ""and an error is given if "" ""there is more than one compilation "" ""action for a source file. "" ""alpha(default in CTU mode): If there is more "" ""than one compilation action for a source "" ""file, only the one is kept that belongs to the "" ""alphabetically first "" ""compilation target. "" ""If none of the above given, "" ""this parameter should "" ""be a python regular expression."" ""If there is more than one compilation action "" ""for a source, "" ""only the one is kept which matches the "" ""given python regex. If more than one "" ""matches an error is given. "" ""The whole compilation "" ""action text is searched for match."") analyzer_opts.add_argument('--report-hash', dest=""report_hash"", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help=""R|Specify the hash calculation method "" ""for reports. By default the calculation "" ""method for Clang Static Analyzer is "" ""context sensitive and for Clang Tidy it "" ""is context insensitive.\nYou can use the "" ""following calculation methods:\n"" ""- context-free: there was a bug and for "" ""Clang Tidy not the context free hash "" ""was generated (kept for backward "" ""compatibility).\n"" ""- context-free-v2: context free hash is "" ""used for ClangSA and Clang Tidy.\n"" ""See the 'issue hashes' section of the "" ""help message of this command below for "" ""more information.\n"" ""USE WISELY AND AT YOUR OWN RISK!"") skip_mode = analyzer_opts.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest=""skipfile"", required=False, default=argparse.SUPPRESS, help=""Path to the Skipfile dictating which project "" ""files should be omitted from analysis. "" ""Please consult the User guide on how a "" ""Skipfile should be laid out."") skip_mode.add_argument('--file', nargs='+', dest=""files"", metavar='FILE', required=False, default=argparse.SUPPRESS, help=""Analyze only the given file(s) not the whole "" ""compilation database. Absolute directory "" ""paths should start with '/', relative "" ""directory paths should start with '*' and "" ""it can contain path glob pattern. "" ""Example: '/path/to/main.cpp', 'lib/*.cpp', "" ""*/test*'."") analyzer_opts.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help=""Run analysis only with the analyzers "" ""specified. Currently supported analyzers "" ""are: "" + ', '.join(analyzer_types. supported_analyzers) + ""."") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help=""Store standard output and standard error "" ""of successful analyzer invocations "" ""into the '/success' "" ""directory."") analyzer_opts.add_argument('--config', dest='config_file', required=False, help=""R|Allow the configuration from an "" ""explicit JSON based configuration file. "" ""The value of the 'analyzer' key in the "" ""config file will be emplaced as command "" ""line arguments. The format of "" ""configuration file is:\n"" ""{\n"" "" \""analyzer\"": [\n"" "" \""--enable=core.DivideZero\"",\n"" "" \""--enable=core.CallAndMessage\"",\n"" "" \""--report-hash=context-free-v2\"",\n"" "" \""--verbose=debug\"",\n"" "" \""--skip=$HOME/project/skip.txt\"",\n"" "" \""--clean\""\n"" "" ]\n"" ""}.\n"" ""You can use any environment variable "" ""inside this file and it will be "" ""expaneded."") # TODO: One day, get rid of these. See Issue #36, #427. analyzer_opts.add_argument('--saargs', dest=""clangsa_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang Static "" ""analyzer."") analyzer_opts.add_argument('--tidyargs', dest=""tidy_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang-Tidy "" ""analyzer."") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help=""A file in YAML format containing the "" ""configuration of clang-tidy checkers. "" ""The file can be dumped by "" ""'CodeChecker analyzers --dump-config "" ""clang-tidy' command."") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=[""clang-tidy:HeaderFilterRegex=.*""], help=""Analyzer configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker analyzers "" ""--analyzer-config'. To disable the "" ""default behaviour of this option you can "" ""use the "" ""'clang-tidy:take-config-from-directory="" ""true' option."") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help=""Checker configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker checkers --checker-config'."") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help=""The amount of time (in seconds) that "" ""each analyzer can spend, individually, "" ""to analyze the project. If the analysis "" ""of a particular file takes longer than "" ""this time, the analyzer is killed and "" ""the analysis is considered as a failed "" ""one."") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help=""Enable the z3 solver backend. This "" ""allows reasoning over more complex "" ""queries, but performance is worse "" ""than the default range-based "" ""constraint solver."") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument('--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help=""Switch on/off the Z3 SMT Solver "" ""backend to "" ""reduce false positives. The results "" ""of the ranged based constraint "" ""solver in the Clang Static Analyzer "" ""will be cross checked with the Z3 "" ""SMT solver. This should not cause "" ""that much of a slowdown compared to "" ""using the Z3 solver only."") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( ""cross translation unit analysis arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker check' is called."""""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform Cross Translation Unit (CTU) "" ""analysis, both 'collect' and 'analyze' "" ""phases. In this mode, the extra files "" ""created by 'collect' are cleaned up "" ""after the analysis."") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the first, 'collect' phase of "" ""Cross-TU analysis. This phase generates "" ""extra files needed by CTU analysis, and "" ""puts them into '/ctu-dir'. "" ""NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the second, 'analyze' phase of "" ""Cross-TU analysis, using already "" ""available extra files in "" ""'/ctu-dir'. (These files "" ""will not be cleaned up in this mode.)"") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help=""DEPRECATED. The flag will be removed. "" ""If Cross-TU analysis is enabled and "" ""fails for some reason, try to re analyze "" ""the same translation unit without "" ""Cross-TU enabled."") # Only check for AST loading modes if CTU is available. if analyzer_types.is_ctu_on_demand_available(context): ctu_opts.add_argument('--ctu-ast-mode', action='store', dest='ctu_ast_mode', choices=['load-from-pch', 'parse-on-demand'], default='parse-on-demand', help=""Choose the way ASTs are loaded during "" ""CTU analysis. Mode 'load-from-pch' "" ""generates PCH format serialized ASTs "" ""during the 'collect' phase. Mode "" ""'parse-on-demand' only generates the "" ""invocations needed to parse the ASTs. "" ""Mode 'load-from-pch' can use "" ""significant disk-space for the "" ""serialized ASTs, while mode "" ""'parse-on-demand' can incur some "" ""runtime CPU overhead in the second "" ""phase of the analysis."") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( ""Statistics analysis feature arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available)."""""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help=""Perform the first, 'collect' phase of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis, and "" ""puts them into "" ""''."" "" NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help=""Use the previously generated statistics "" ""results for the analysis from the given "" ""''."") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help=""Perform both phases of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis and enables "" ""the statistical checkers. "" ""No need to enable them explicitly."") stat_opts.add_argument('--stats-min-sample-count', action='store', default=""10"", type=int, dest='stats_min_sample_count', help=""Minimum number of samples (function call"" "" occurrences) to be collected"" "" for a statistics to be relevant."") stat_opts.add_argument('--stats-relevance-threshold', action='store', default=""0.85"", type=float, dest='stats_relevance_threshold', help=""The minimum ratio of calls of function "" ""f that must have a certain property "" ""property to consider it true for that "" ""function (calculated as calls "" ""with a property/all calls)."" "" CodeChecker will warn for"" "" calls of f do not have that property."") checkers_opts = parser.add_argument_group( ""checker configuration"", """""" Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or ""checkers"". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy. Profiles ------------------------------------------------ In CodeCheckers there is a manual grouping of checkers. These groups are called profiles. The collection of profiles is found in config/checker_profile_map.json file. The goal of these profile is that you can enable or disable checkers by these profiles. See the output of ""CodeChecker checkers --profile list"" command. Guidelines ------------------------------------------------ There are several coding guidelines like CppCoreGuideline, SEI-CERT, etc. These are collections of best programming practices to avoid common programming errors. Some checkers cover the rules of these guidelines. In CodeChecker there is a mapping between guidelines and checkers. This way you can list and enable those checkers which check the fulfillment of certain guideline rules. See the output of ""CodeChecker checkers --guideline"" command."""""") checkers_opts.add_argument('-e', '--enable', dest=""enable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group), "" ""profile or guideline "" ""to BE USED in the analysis. In case of "" ""ambiguity the priority order is profile, "" ""guideline, checker name (e.g. security "" ""means the profile, not the checker "" ""group). Profiles and guidelines can be "" ""labeled: 'profile:security' or "" ""'guideline:sei-cert'."") checkers_opts.add_argument('-d', '--disable', dest=""disable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group), "" ""profile or guideline "" ""to BE PROHIBITED from use in the "" ""analysis. In case of "" ""ambiguity the priority order is profile, "" ""guideline, checker name (e.g. security "" ""means the profile, not the checker "" ""group). Profiles and guidelines can be "" ""labeled: 'profile:security' or "" ""'guideline:sei-cert'."") checkers_opts.add_argument('--enable-all', dest=""enable_all"", action='store_true', required=False, default=argparse.SUPPRESS, help=""Force the running analyzers to use "" ""almost every checker available. The "" ""checker groups 'alpha.', 'debug.', "" ""'osx.', 'abseil-', 'android-', "" ""'darwin-', 'objc-', 'cppcoreguidelines-', "" ""'fuchsia.', 'fuchsia-', 'hicpp-', 'llvm-', "" ""'llvmlibc-', 'google-', 'zircon-'"" ""'osx.' (on Linux) are NOT enabled "" ""automatically and must be EXPLICITLY "" ""specified. WARNING! Enabling all "" ""checkers might result in the analysis "" ""losing precision and stability, and "" ""could even result in a total failure of "" ""the analysis. USE WISELY AND AT YOUR "" ""OWN RISK!"") output_opts = parser.add_argument_group(""output arguments"") output_opts.add_argument('--print-steps', dest=""print_steps"", action=""store_true"", required=False, default=argparse.SUPPRESS, help=""Print the steps the analyzers took in "" ""finding the reported defect."") parser.add_argument('--review-status', nargs='*', dest=""review_status"", metavar='REVIEW_STATUS', choices=REVIEW_STATUS_VALUES, default=[""confirmed"", ""unreviewed""], help=""Filter results by review statuses. Valid "" ""values are: {0}"".format( ', '.join(REVIEW_STATUS_VALUES))) logger.add_verbose_arguments(parser) parser.set_defaults(func=main) " 58220,"def verify_url(url): """"""Verify that a URL can be used to communicate with the Datadog Agent."""""" parsed = compat.parse.urlparse(url) if parsed.scheme not in [""http"", ""https"", ""unix""]: raise ValueError( ""Unsupported protocol '%s' in Agent URL '%s'. Must be: 'http', 'https' or 'unix'."" % (parsed.scheme, url) ) elif parsed.scheme in [""http"", ""https""] and not parsed.hostname: raise ValueError(""Invalid hostname in Agent URL '%s'."" % url) elif parsed.scheme == ""unix"" and not parsed.path: raise ValueError(""Invalid file path in Agent URL '%s'."" % url) return parsed ","def verify_url(url): """"""Verify that a URL can be used to communicate with the Datadog Agent."""""" parsed = compat.parse.urlparse(url) valid_schemes = (""http"", ""https"", ""unix"") if parsed.scheme not in valid_schemes: raise ValueError( ""Unsupported protocol '%s' in Agent URL '%s'. Must be one of: %s."" % (parsed.scheme, url, valid_schemes) ) elif parsed.scheme in [""http"", ""https""] and not parsed.hostname: raise ValueError(""Invalid hostname in Agent URL '%s'."" % url) elif parsed.scheme == ""unix"" and not parsed.path: raise ValueError(""Invalid file path in Agent URL '%s'."" % url) return parsed " 13584,"def interpolate_function(function, parameter_sample, evaluation_points, atol=None, rtol=None, max_interpolation_dofs=None): """"""Parameter separable approximation of a |Function| using Empiricial Interpolation. This method computes a parameter separated |LincombFunction| approximating the input |Function| using Empirical Interpolation :cite`BMNP04`. The actual EI Greedy algorithm is contained in :func:`ei_greedy`. This function acts as a convenience wrapper, which computes the training data and constructs an :class:`~pymor.analyticalproblems.functions.EmpiricalInterpolatedFunction` from the data returned by :func:`ei_greedy`. .. note:: If possible, choose `evaluation_points` identical to the coordinates at which the interpolated function is going to be evaluated. Otherwise `function` will have to be re-evaluated at all new evaluation points for all |parameter values| given by `parameter_sample`. Parameters ---------- function The function to interpolate. parameter_sample A list of |Parameters| for which `function` is evaluate to generate the training data. evaluation_points |NumPy array| of coordinates at which `function` should be evaluated to generate the training data. atol See :func:`ei_greedy`. rtol See :func:`ei_greedy`. max_interpolation_dofs See :func:`ei_greedy`. Returns ------- ei_function The :class:`~pymor.analyticalproblems.functions.EmpiricalInterpolatedFunction` giving the parameter separable approximation of `function`. data `dict` of additional data as returned by :func:`ei_greedy`. """""" assert isinstance(function, Function) assert isinstance(evaluation_points, np.ndarray) and evaluation_points.ndim == 2 and \ evaluation_points.shape[1] == function.dim_domain snapshot_data = NumpyVectorSpace.from_numpy( np.array([function(evaluation_points, mu=mu) for mu in parameter_sample]) ) dofs, basis, ei_data = ei_greedy(snapshot_data, error_norm='sup', atol=atol, rtol=rtol, max_interpolation_dofs=max_interpolation_dofs) ei_function = EmpiricalInterpolatedFunction( function, evaluation_points[dofs], ei_data['interpolation_matrix'], True, parameter_sample, ei_data['coefficients'], evaluation_points=evaluation_points, basis_evaluations=basis.to_numpy() ) return ei_function, ei_data ","def interpolate_function(function, parameter_sample, evaluation_points, atol=None, rtol=None, max_interpolation_dofs=None): """"""Parameter separable approximation of a |Function| using Empiricial Interpolation. This method computes a parameter separated |LincombFunction| approximating the input |Function| using Empirical Interpolation :cite`BMNP04`. The actual EI Greedy algorithm is contained in :func:`ei_greedy`. This function acts as a convenience wrapper, which computes the training data and constructs an :class:`~pymor.analyticalproblems.functions.EmpiricalInterpolatedFunction` from the data returned by :func:`ei_greedy`. .. note:: If possible, choose `evaluation_points` identical to the coordinates at which the interpolated function is going to be evaluated. Otherwise `function` will have to be re-evaluated at all new evaluation points for all |parameter values| given by `parameter_sample`. Parameters ---------- function The function to interpolate. parameter_sample A list of |Parameters| for which `function` is evaluated to generate the training data. evaluation_points |NumPy array| of coordinates at which `function` should be evaluated to generate the training data. atol See :func:`ei_greedy`. rtol See :func:`ei_greedy`. max_interpolation_dofs See :func:`ei_greedy`. Returns ------- ei_function The :class:`~pymor.analyticalproblems.functions.EmpiricalInterpolatedFunction` giving the parameter separable approximation of `function`. data `dict` of additional data as returned by :func:`ei_greedy`. """""" assert isinstance(function, Function) assert isinstance(evaluation_points, np.ndarray) and evaluation_points.ndim == 2 and \ evaluation_points.shape[1] == function.dim_domain snapshot_data = NumpyVectorSpace.from_numpy( np.array([function(evaluation_points, mu=mu) for mu in parameter_sample]) ) dofs, basis, ei_data = ei_greedy(snapshot_data, error_norm='sup', atol=atol, rtol=rtol, max_interpolation_dofs=max_interpolation_dofs) ei_function = EmpiricalInterpolatedFunction( function, evaluation_points[dofs], ei_data['interpolation_matrix'], True, parameter_sample, ei_data['coefficients'], evaluation_points=evaluation_points, basis_evaluations=basis.to_numpy() ) return ei_function, ei_data " 32367,"def main(): SCOs: dict[str, str] = { ""file md5"": ""[file:hashes.md5 ='{}']"", ""file sha1"": ""[file:hashes.sha1 = '{}']"", ""file sha256"": ""[file:hashes.sha256 = '{}']"", ""ssdeep"": ""[file:hashes.ssdeep = '']"", ""ip"": ""[ipv4-addr:value = '{}']"", ""cidr"": ""[ipv4-addr:value = '{}']"", ""ipv6"": ""[ipv6-addr:value = '{}']"", ""ipv6cidr"": ""[ipv6-addr:value = '{}']"", ""url"": ""[url:value = '{}']"", ""email"": ""[email-message:sender_ref.value = '{}']"", ""username"": ""[user-account:account_login = '{}']"", ""domain"": ""[domain-name:value = '{}']"", ""hostname"": ""[domain-name:value = '{}']"", ""registry key"": ""[windows-registry-key:key = '{}']"" } SDOs: dict[str, Callable] = { ""malware"": Malware, ""attack pattern"": AttackPattern, ""campaign"": Campaign, ""infrastructure"": Infrastructure, ""tool"": Tool, ""intrusion set"": IntrusionSet, ""report"": Report, ""threat actor"": ThreatActor, ""cve"": Vulnerability, ""course of action"": CourseOfAction } user_args = demisto.args().get('indicators', 'Unknown') doubleBackslash = demisto.args().get('doubleBackslash', True) all_args = {} if isinstance(user_args, dict): all_args = json.loads(json.dumps(user_args)) else: try: all_args = json.loads(demisto.args().get('indicators', 'Unknown')) except: # noqa: E722 return_error('indicators argument is invalid json object') indicators = [] for indicator_fields in all_args: kwargs: dict[str, Any] = {""allow_custom"": True} demisto_indicator_type = all_args[indicator_fields].get('indicator_type', 'Unknown') if doubleBackslash: value = all_args[indicator_fields].get('value', '').replace('\\', r'\\') else: value = all_args[indicator_fields].get('value', '') demisto_score = all_args[indicator_fields].get('score', '').lower() if demisto_score in [""bad"", ""malicious""]: kwargs[""score""] = ""High"" elif demisto_score == ""suspicious"": kwargs[""score""] = ""Medium"" elif demisto_score in [""good"", ""benign""]: kwargs[""score""] = ""None"" else: kwargs[""score""] = ""Not Specified"" kwargs[""created""] = dateparser.parse(all_args[indicator_fields].get('timestamp', '')) kwargs[""modified""] = dateparser.parse(all_args[indicator_fields].get('lastSeen', f'{kwargs[""created""]}')) kwargs[""id""] = all_args[indicator_fields].get('stixid', '') kwargs[""labels""] = [demisto_indicator_type.lower()] kwargs[""description""] = all_args[indicator_fields].get('description', '') kwargs = {k: v for k, v in kwargs.items() if v} # Removing keys with empty strings try: indicator_type = demisto_indicator_type.lower().replace(""-"", """") indicator = Indicator(pattern=SCOs[indicator_type].format(value), pattern_type='stix', **kwargs) indicators.append(indicator) except KeyError: try: indicator_type = demisto_indicator_type.lower() if indicator_type == 'cve': kwargs[""external_references""] = [ExternalReference(source_name=""cve"", external_id=value)] elif indicator_type == ""attack pattern"": try: mitreid = all_args[indicator_fields].get('mitreid', '') if mitreid: kwargs[""external_references""] = [ExternalReference(source_name=""mitre"", external_id=mitreid)] except KeyError: pass indicator = SDOs[indicator_type]( name=value, **kwargs ) indicators.append(indicator) except (KeyError, TypeError) as e: demisto.info( ""Indicator type: {}, with the value: {} is not STIX compatible"".format(demisto_indicator_type, value)) demisto.info(""Export failure excpetion: {}"".format(e)) continue if len(indicators) > 1: bundle = Bundle(indicators) context = { 'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(bundle)) } res = (CommandResults(readable_output="""", outputs=context, raw_response=str(bundle))) elif len(indicators) == 1: context = { 'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(indicators[0])) } res = (CommandResults(readable_output="""", outputs=context, raw_response=str(indicators[0]))) else: context = { 'StixExportedIndicators': {} } res = CommandResults(readable_output="""", outputs=context, raw_response={}) return_results(res) ","def main(): SCOs: dict[str, str] = { 'file md5': 'file:hashes.md5', ""file sha1"": ""[file:hashes.sha1 = '{}']"", ""file sha256"": ""[file:hashes.sha256 = '{}']"", ""ssdeep"": ""[file:hashes.ssdeep = '']"", ""ip"": ""[ipv4-addr:value = '{}']"", ""cidr"": ""[ipv4-addr:value = '{}']"", ""ipv6"": ""[ipv6-addr:value = '{}']"", ""ipv6cidr"": ""[ipv6-addr:value = '{}']"", ""url"": ""[url:value = '{}']"", ""email"": ""[email-message:sender_ref.value = '{}']"", ""username"": ""[user-account:account_login = '{}']"", ""domain"": ""[domain-name:value = '{}']"", ""hostname"": ""[domain-name:value = '{}']"", ""registry key"": ""[windows-registry-key:key = '{}']"" } SDOs: dict[str, Callable] = { ""malware"": Malware, ""attack pattern"": AttackPattern, ""campaign"": Campaign, ""infrastructure"": Infrastructure, ""tool"": Tool, ""intrusion set"": IntrusionSet, ""report"": Report, ""threat actor"": ThreatActor, ""cve"": Vulnerability, ""course of action"": CourseOfAction } user_args = demisto.args().get('indicators', 'Unknown') doubleBackslash = demisto.args().get('doubleBackslash', True) all_args = {} if isinstance(user_args, dict): all_args = json.loads(json.dumps(user_args)) else: try: all_args = json.loads(demisto.args().get('indicators', 'Unknown')) except: # noqa: E722 return_error('indicators argument is invalid json object') indicators = [] for indicator_fields in all_args: kwargs: dict[str, Any] = {""allow_custom"": True} demisto_indicator_type = all_args[indicator_fields].get('indicator_type', 'Unknown') if doubleBackslash: value = all_args[indicator_fields].get('value', '').replace('\\', r'\\') else: value = all_args[indicator_fields].get('value', '') demisto_score = all_args[indicator_fields].get('score', '').lower() if demisto_score in [""bad"", ""malicious""]: kwargs[""score""] = ""High"" elif demisto_score == ""suspicious"": kwargs[""score""] = ""Medium"" elif demisto_score in [""good"", ""benign""]: kwargs[""score""] = ""None"" else: kwargs[""score""] = ""Not Specified"" kwargs[""created""] = dateparser.parse(all_args[indicator_fields].get('timestamp', '')) kwargs[""modified""] = dateparser.parse(all_args[indicator_fields].get('lastSeen', f'{kwargs[""created""]}')) kwargs[""id""] = all_args[indicator_fields].get('stixid', '') kwargs[""labels""] = [demisto_indicator_type.lower()] kwargs[""description""] = all_args[indicator_fields].get('description', '') kwargs = {k: v for k, v in kwargs.items() if v} # Removing keys with empty strings try: indicator_type = demisto_indicator_type.lower().replace(""-"", """") indicator = Indicator(pattern=SCOs[indicator_type].format(value), pattern_type='stix', **kwargs) indicators.append(indicator) except KeyError: try: indicator_type = demisto_indicator_type.lower() if indicator_type == 'cve': kwargs[""external_references""] = [ExternalReference(source_name=""cve"", external_id=value)] elif indicator_type == ""attack pattern"": try: mitreid = all_args[indicator_fields].get('mitreid', '') if mitreid: kwargs[""external_references""] = [ExternalReference(source_name=""mitre"", external_id=mitreid)] except KeyError: pass indicator = SDOs[indicator_type]( name=value, **kwargs ) indicators.append(indicator) except (KeyError, TypeError) as e: demisto.info( ""Indicator type: {}, with the value: {} is not STIX compatible"".format(demisto_indicator_type, value)) demisto.info(""Export failure excpetion: {}"".format(e)) continue if len(indicators) > 1: bundle = Bundle(indicators) context = { 'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(bundle)) } res = (CommandResults(readable_output="""", outputs=context, raw_response=str(bundle))) elif len(indicators) == 1: context = { 'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(indicators[0])) } res = (CommandResults(readable_output="""", outputs=context, raw_response=str(indicators[0]))) else: context = { 'StixExportedIndicators': {} } res = CommandResults(readable_output="""", outputs=context, raw_response={}) return_results(res) " 32817,"def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.set_tag(http.METHOD, method) if url: span.set_tag(http.URL, url) if status_code: span.set_tag(http.STATUS_CODE, status_code) if 500 <= int(status_code) < 600: span.error = 1 ","def set_http_meta(config, span, method=None, url=None, status_code=None): if method: span.meta[http.METHOD] = method if url: span.set_tag(http.URL, url) if status_code: span.set_tag(http.STATUS_CODE, status_code) if 500 <= int(status_code) < 600: span.error = 1 " 36588,"def compilerCanOptimize(): """""" Return True iff the default Xcode version can use PGO """""" # The version check is pretty conservative, can be # adjusted after testing mac_ver = tuple(map(int, platform.mac_ver()[0].split('.'))) return mac_ver >= (10, 15) ","def compilerCanOptimize(): """""" Return True iff the default Xcode version can use PGO and LTO """""" # The version check is pretty conservative, can be # adjusted after testing mac_ver = tuple(map(int, platform.mac_ver()[0].split('.'))) return mac_ver >= (10, 15) " 34727,"def create_data_generators( model_data: RasaModelData, batch_sizes: Union[int, List[int]], epochs: int, batch_strategy: Text = SEQUENCE, eval_num_examples: int = 0, random_seed: Optional[int] = None, shuffle: bool = True, ) -> Tuple[RasaBatchDataGenerator, Optional[RasaBatchDataGenerator]]: """"""Create data generators for train and optional validation data. Args: model_data: The model data to use. batch_sizes: The batch size(s). epochs: The number of epochs to train. batch_strategy: The batch strategy to use. eval_num_examples: Number of examples to use for validation data. random_seed: The random seed. shuffle: Whether to shuffle data inside the data generator Returns: The training data generator and optional validation data generator. """""" validation_data_generator = None if eval_num_examples > 0: model_data, evaluation_model_data = model_data.split( eval_num_examples, random_seed, ) validation_data_generator = RasaBatchDataGenerator( evaluation_model_data, batch_size=batch_sizes, epochs=epochs, batch_strategy=batch_strategy, shuffle=shuffle, ) data_generator = RasaBatchDataGenerator( model_data, batch_size=batch_sizes, epochs=epochs, batch_strategy=batch_strategy, shuffle=shuffle, ) return data_generator, validation_data_generator ","def create_data_generators( model_data: RasaModelData, batch_sizes: Union[int, List[int]], epochs: int, batch_strategy: Text = SEQUENCE, eval_num_examples: int = 0, random_seed: Optional[int] = None, shuffle: bool = True, ) -> Tuple[RasaBatchDataGenerator, Optional[RasaBatchDataGenerator]]: """"""Create data generators for train and optional validation data. Args: model_data: The model data to use. batch_sizes: The batch size(s). epochs: The number of epochs to train. batch_strategy: The batch strategy to use. eval_num_examples: Number of examples to use for validation data. random_seed: The random seed. shuffle: Whether to shuffle data inside the data generator. Returns: The training data generator and optional validation data generator. """""" validation_data_generator = None if eval_num_examples > 0: model_data, evaluation_model_data = model_data.split( eval_num_examples, random_seed, ) validation_data_generator = RasaBatchDataGenerator( evaluation_model_data, batch_size=batch_sizes, epochs=epochs, batch_strategy=batch_strategy, shuffle=shuffle, ) data_generator = RasaBatchDataGenerator( model_data, batch_size=batch_sizes, epochs=epochs, batch_strategy=batch_strategy, shuffle=shuffle, ) return data_generator, validation_data_generator " 50025,"def file_obj_to_premis(file_obj): """""" Converts an File model object to a PREMIS event object via metsrw. Returns: metsrw.plugins.premisrw.premis.PREMISObject """""" premis_digest_algorithm = convert_to_premis_hash_function(file_obj.checksumtype) premis_data = ( ""object"", metsrw.plugins.premisrw.PREMIS_2_2_META, ( ""object_identifier"", (""object_identifier_type"", ""UUID""), (""object_identifier_value"", file_obj.uuid), ), ( ""object_characteristics"", (""composition_level"", ""0""), ( ""fixity"", (""message_digest_algorithm"", premis_digest_algorithm), (""message_digest"", file_obj.checksum), ), (""size"", str(file_obj.size)), get_premis_format_data(file_obj.fileid_set.all()), ( ""creating_application"", ( ""date_created_by_application"", file_obj.modificationtime.strftime(""%Y-%m-%d""), ), ), ), (""original_name"", escape(file_obj.originallocation)), ) + get_premis_relationship_data( file_obj.related_is_source_of, file_obj.related_has_source ) return metsrw.plugins.premisrw.data_to_premis(premis_data) ","def file_obj_to_premis(file_obj): """""" Converts an File model object to a PREMIS event object via metsrw. Returns: metsrw.plugins.premisrw.premis.PREMISObject """""" premis_digest_algorithm = convert_to_premis_hash_function(file_obj.checksumtype) premis_data = ( ""object"", metsrw.plugins.premisrw.PREMIS_2_2_META, ( ""object_identifier"", (""object_identifier_type"", ""UUID""), (""object_identifier_value"", file_obj.uuid), ), ( ""object_characteristics"", (""composition_level"", ""0""), ( ""fixity"", (""message_digest_algorithm"", premis_digest_algorithm), (""message_digest"", file_obj.checksum), ), (""size"", str(file_obj.size)), get_premis_format_data(file_obj.fileid_set.all()), ( ""creating_application"", ( ""date_created_by_application"", file_obj.modificationtime.strftime(""%Y-%m-%d""), ), ), ), (""original_name"", escape(file_obj.originallocation)), ) + get_premis_relationship_data( file_obj.related_is_source_of, file_obj.related_has_source ) return metsrw.plugins.premisrw.data_to_premis( premis_data, metsrw.plugins.premisrw.PREMIS_3_0_VERSION) " 39584,"def compile_insert_unless_conflict( subject: irast.Set, stmt: irast.InsertStmt, insert_subject: qlast.Path, shape: List[qlast.ShapeElement], constraint_spec: qlast.Expr, else_branch: Optional[qlast.Expr], *, ctx: context.ContextLevel, ) -> Tuple[Optional[irast.ConstraintRef], Optional[Tuple[irast.Set, irast.Set]]]: with ctx.new() as constraint_ctx: constraint_ctx.partial_path_prefix = subject # We compile the name here so we can analyze it, but we don't do # anything else with it. cspec_res = setgen.ensure_set(dispatch.compile( constraint_spec, ctx=constraint_ctx), ctx=constraint_ctx) if not cspec_res.rptr: raise errors.QueryError( 'ON CONFLICT argument must be a property', context=constraint_spec.context, ) if cspec_res.rptr.source.path_id != subject.path_id: raise errors.QueryError( 'ON CONFLICT argument must be a property of the ' 'type being inserted', context=constraint_spec.context, ) schema = ctx.env.schema schema, ptr = ( typeutils.ptrcls_from_ptrref(cspec_res.rptr.ptrref, schema=schema)) if not isinstance(ptr, s_pointers.Pointer): raise errors.QueryError( 'ON CONFLICT property must be a property', context=constraint_spec.context, ) if ptr.get_cardinality(schema) != qltypes.SchemaCardinality.ONE: raise errors.QueryError( 'ON CONFLICT property must be a SINGLE property', context=constraint_spec.context, ) ptr = ptr.get_nearest_non_derived_parent(schema) exclusive_constr: s_constr.Constraint = schema.get('std::exclusive') ex_cnstrs = [c for c in ptr.get_constraints(schema).objects(schema) if c.issubclass(schema, exclusive_constr)] if len(ex_cnstrs) != 1: raise errors.QueryError( 'ON CONFLICT property must have a single exclusive constraint', context=constraint_spec.context, ) module_id = schema.get_global( s_mod.Module, ptr.get_name(schema).module).id # Find the source expression corresponding to our field # FIXME: Is there a better way to do this? field_name = cspec_res.rptr.ptrref.shortname.split('::')[-1] for i, elem in enumerate(shape): if ( isinstance(elem.expr, qlast.Path) and len(elem.expr.steps) == 1 and isinstance(elem.expr.steps[0], qlast.Ptr) and isinstance(elem.expr.steps[0].ptr, qlast.ObjectRef) and elem.expr.steps[0].ptr.name == field_name ): idx = i break else: raise errors.QueryError( 'INSERT ON CONFLICT property requires matching shape', context=constraint_spec.context, ) elem_fixed = copy.copy(elem) # Lift the index element out into an anchor. # FIXME: The goal here is to avoid duplicating the computation of the # index element if it is volatile but it doesn't actually work yet. new_set = setgen.ensure_set(dispatch.compile( elem.compexpr, ctx=ctx), ctx=ctx) ctx.anchors = ctx.anchors.copy() source_alias = ctx.aliases.get('a') ctx.anchors[source_alias] = new_set elem_fixed.compexpr = qlast.Path( steps=[qlast.ObjectRef(name=source_alias)]) shape[idx] = elem_fixed ctx.env.schema = schema # Compile an else branch else_info = None if else_branch: with ctx.subquery() as ectx: # Produce a query that finds the conflicting objects nobe = qlast.SelectQuery( result=insert_subject, where=qlast.BinOp( op='=', left=constraint_spec, right=elem_fixed.compexpr ), ) select_ir = dispatch.compile(nobe, ctx=ectx) select_ir = setgen.scoped_set( select_ir, force_reassign=True, ctx=ectx) assert isinstance(select_ir, irast.Set) # The ELSE needs to be able to reference the subject in an # UPDATE, even though that would normally be prohibited. ectx.path_scope.factoring_allowlist.add(subject.path_id) # Compile else else_ir = dispatch.compile(else_branch, ctx=ectx) assert isinstance(else_ir, irast.Set) else_info = select_ir, else_ir return ( irast.ConstraintRef(id=ex_cnstrs[0].id, module_id=module_id), else_info ) ","def compile_insert_unless_conflict( subject: irast.Set, stmt: irast.InsertStmt, insert_subject: qlast.Path, shape: List[qlast.ShapeElement], constraint_spec: qlast.Expr, else_branch: Optional[qlast.Expr], *, ctx: context.ContextLevel, ) -> Tuple[Optional[irast.ConstraintRef], Optional[Tuple[irast.Set, irast.Set]]]: with ctx.new() as constraint_ctx: constraint_ctx.partial_path_prefix = subject # We compile the name here so we can analyze it, but we don't do # anything else with it. cspec_res = setgen.ensure_set(dispatch.compile( constraint_spec, ctx=constraint_ctx), ctx=constraint_ctx) if not cspec_res.rptr: raise errors.QueryError( 'ON CONFLICT argument must be a property', context=constraint_spec.context, ) if cspec_res.rptr.source.path_id != subject.path_id: raise errors.QueryError( 'ON CONFLICT argument must be a property of the ' 'type being inserted', context=constraint_spec.context, ) schema = ctx.env.schema schema, ptr = ( typeutils.ptrcls_from_ptrref(cspec_res.rptr.ptrref, schema=schema)) if not isinstance(ptr, s_pointers.Pointer): raise errors.QueryError( 'ON CONFLICT property must be a property', context=constraint_spec.context, ) if ptr.get_cardinality(schema) != qltypes.SchemaCardinality.ONE: raise errors.QueryError( 'ON CONFLICT property must be a SINGLE property', context=constraint_spec.context, ) ptr = ptr.get_nearest_non_derived_parent(schema) exclusive_constr: s_constr.Constraint = schema.get('std::exclusive') ex_cnstrs = [c for c in ptr.get_constraints(schema).objects(schema) if c.issubclass(schema, exclusive_constr)] if len(ex_cnstrs) != 1: raise errors.QueryError( 'ON CONFLICT property must have a single exclusive constraint', context=constraint_spec.context, ) module_id = schema.get_global( s_mod.Module, ptr.get_name(schema).module).id # Find the source expression corresponding to our field # FIXME: Is there a better way to do this? field_name = cspec_res.rptr.ptrref.shortname.split('::')[-1] for i, elem in enumerate(shape): if ( isinstance(elem.expr, qlast.Path) and len(elem.expr.steps) == 1 and isinstance(elem.expr.steps[0], qlast.Ptr) and isinstance(elem.expr.steps[0].ptr, qlast.ObjectRef) and elem.expr.steps[0].ptr.name == field_name ): idx = i break else: raise errors.QueryError( 'INSERT ON CONFLICT property requires matching shape', context=constraint_spec.context, ) elem_fixed = copy.copy(elem) # Lift the index element out into an anchor. # FIXME: The goal here is to avoid duplicating the computation of the # index element if it is volatile but it doesn't actually work yet. new_set = setgen.ensure_set(dispatch.compile( elem.compexpr, ctx=ctx), ctx=ctx) ctx.anchors = ctx.anchors.copy() source_alias = ctx.aliases.get('a') ctx.anchors[source_alias] = new_set elem_fixed.compexpr = qlast.Path( steps=[qlast.ObjectRef(name=source_alias)]) shape[idx] = elem_fixed ctx.env.schema = schema # Compile an else branch else_info = None if else_branch: with ctx.subquery() as ectx: # Produce a query that finds the conflicting objects nobe = qlast.SelectQuery( result=insert_subject, where=qlast.BinOp( op='=', left=constraint_spec, right=elem_fixed.compexpr ), ) select_ir = dispatch.compile(nobe, ctx=ectx) select_ir = setgen.scoped_set( select_ir, force_reassign=True, ctx=ectx) assert isinstance(select_ir, irast.Set) # The ELSE needs to be able to reference the subject in an # UPDATE, even though that would normally be prohibited. ectx.path_scope.factoring_allowlist.add(subject.path_id) # Compile else else_ir = dispatch.compile(astutils.ensure_qlstmt(else_branch), ctx=ectx) assert isinstance(else_ir, irast.Set) else_info = select_ir, else_ir return ( irast.ConstraintRef(id=ex_cnstrs[0].id, module_id=module_id), else_info ) " 23056,"def test_read_inconsistent_schema_pyarrow(tmpdir): check_pyarrow() # Note: This is a proxy test for a cudf-related issue fix # (see cudf#5062 github issue). The cause of that issue is # schema inconsistencies that do not actually correspond to # different types, but whether or not the file/column contains # null values. df1 = pd.DataFrame({""id"": [0, 1], ""val"": [10, 20]}) df2 = pd.DataFrame({""id"": [2, 3], ""val"": [30, 40]}) desired_type = ""int64"" other_type = ""int32"" df1.val = df1.val.astype(desired_type) df2.val = df2.val.astype(other_type) df_expect = pd.concat([df1, df2], ignore_index=True) df_expect.val = df_expect.val.astype(desired_type) df1.to_parquet(os.path.join(tmpdir, ""0.parquet"")) df2.to_parquet(os.path.join(tmpdir, ""1.parquet"")) # Read Directory check = dd.read_parquet(str(tmpdir), dataset={""validate_schema"": False}) assert_eq(check.compute(), df_expect, check_index=False) # Read List check = dd.read_parquet( os.path.join(tmpdir, ""*.parquet""), dataset={""validate_schema"": False} ) assert_eq(check.compute(), df_expect, check_index=False) ","def test_read_inconsistent_schema_pyarrow(tmpdir): check_pyarrow() # Note: This is a proxy test for a cudf-related issue fix # (see cudf#5062 github issue). The cause of that issue is # schema inconsistencies that do not actually correspond to # different types, but whether or not the file/column contains # null values. df1 = pd.DataFrame({""id"": [0, 1], ""val"": [10, 20]}) df2 = pd.DataFrame({""id"": [2, 3], ""val"": [30, 40]}) desired_type = ""int64"" other_type = ""int32"" df1.val = df1.val.astype(desired_type) df2.val = df2.val.astype(other_type) df_expect = pd.concat([df1, df2], ignore_index=True) df_expect['val'] = df_expect.val.astype(desired_type) df1.to_parquet(os.path.join(tmpdir, ""0.parquet"")) df2.to_parquet(os.path.join(tmpdir, ""1.parquet"")) # Read Directory check = dd.read_parquet(str(tmpdir), dataset={""validate_schema"": False}) assert_eq(check.compute(), df_expect, check_index=False) # Read List check = dd.read_parquet( os.path.join(tmpdir, ""*.parquet""), dataset={""validate_schema"": False} ) assert_eq(check.compute(), df_expect, check_index=False) " 31996,"def create_time(given_time) -> str: """"""converts given argument time to iso format, if received None returns None"""""" if not given_time: return given_time datetime_time = arg_to_datetime(given_time) if not datetime_time: raise DemistoException(""Time parameter supplied in invalid, please supply a valid argument"") return datetime_time.strftime(""%Y-%m-%dT%H:%M:%S.%fZ"") ","def create_time(given_time) -> str: """"""converts given argument time to iso format, if received None returns None"""""" if not given_time: return None datetime_time = arg_to_datetime(given_time) if not datetime_time: raise DemistoException(""Time parameter supplied in invalid, please supply a valid argument"") return datetime_time.strftime(""%Y-%m-%dT%H:%M:%S.%fZ"") " 25764,"def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1, max_iterations=100, track_iterations=False, **kwargs): ''' Iterative linear optimization updating the line parameters for passive AC and DC lines. This is helpful when line expansion is enabled. After each sucessful solving, line impedances and line resistance are recalculated based on the optimization result. If warmstart is possible, it uses the result from the previous iteration to fasten the optimization. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots msq_threshold: float, default 0.05 Maximal mean square difference between optimized line capacity of the current and the previous iteration. As soon as this threshold is undercut, and the number of iterations is bigger than 'min_iterations' the iterative optimization stops min_iterations : integer, default 1 Minimal number of iteration to run regardless whether the msq_threshold is already undercut max_iterations : integer, default 100 Maximal numbder of iterations to run regardless whether msq_threshold is already undercut track_iterations: bool, default False If True, the intermediate branch capacity steps and values of the objective function are recorded for each iteration. The values of iteration 0 stand for the starting point. **kwargs Keyword arguments of the lopf function which runs at each iteration ''' n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) ext_i = get_extendable_i(n, 'Line') typed_i = n.lines.query('type != """"').index ext_untyped_i = ext_i.difference(typed_i) ext_typed_i = ext_i & typed_i base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom)) n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i] def update_line_params(n, s_nom_prev): factor = n.lines.s_nom_opt / s_nom_prev for attr, carrier in (('x', 'AC'), ('r', 'DC')): ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i) n.lines.loc[ln_i, attr] /= factor[ln_i] ln_i = ext_i & typed_i n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i] def msq_diff(n, s_nom_prev): lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \ n.lines['s_nom_opt'].mean() logger.info(f""Mean square difference after iteration {iteration} is "" f""{lines_err}"") return lines_err def save_optimal_capacities(n, iteration, status): for c, attr in pd.Series(nominal_attrs)[n.branch_components].items(): n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt'] setattr(n, f""status_{iteration}"", status) setattr(n, f""objective_{iteration}"", n.objective) n.iteration = iteration if track_iterations: for c, attr in pd.Series(nominal_attrs)[n.branch_components].items(): n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}'] iteration = 1 kwargs['store_basis'] = True diff = msq_threshold while diff >= msq_threshold or iteration < min_iterations: if iteration > max_iterations: logger.info(f'Iteration {iteration} beyond max_iterations ' f'{max_iterations}. Stopping ...') break s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__())) status, termination_condition = network_lopf(n, snapshots, **kwargs) assert status == 'ok', ('Optimization failed with status {status}' 'and termination {termination_condition}') if track_iterations: save_optimal_capacities(n, iteration, status) update_line_params(n, s_nom_prev) diff = msq_diff(n, s_nom_prev) iteration += 1 logger.info('Running last lopf with fixed branches, overwrite p_nom ' 'for links and s_nom for lines') ext_links_i = get_extendable_i(n, 'Link') n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False network_lopf(n, snapshots, **kwargs) n.lines.loc[ext_i, 's_nom_extendable'] = True n.links.loc[ext_links_i, 'p_nom_extendable'] = True ","def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1, max_iterations=100, track_iterations=False, **kwargs): ''' Iterative linear optimization updating the line parameters for passive AC and DC lines. This is helpful when line expansion is enabled. After each sucessful solving, line impedances and line resistance are recalculated based on the optimization result. If warmstart is possible, it uses the result from the previous iteration to fasten the optimization. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots msq_threshold: float, default 0.05 Maximal mean square difference between optimized line capacity of the current and the previous iteration. As soon as this threshold is undercut, and the number of iterations is bigger than 'min_iterations' the iterative optimization stops min_iterations : integer, default 1 Minimal number of iteration to run regardless whether the msq_threshold is already undercut max_iterations : integer, default 100 Maximal numbder of iterations to run regardless whether msq_threshold is already undercut track_iterations: bool, default False If True, the intermediate branch capacity steps and values of the objective function are recorded for each iteration. The values of iteration 0 stand for the starting point. **kwargs Keyword arguments of the lopf function which runs at each iteration ''' n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier) ext_i = get_extendable_i(n, 'Line') typed_i = n.lines.query('type != """"').index ext_untyped_i = ext_i.difference(typed_i) ext_typed_i = ext_i & typed_i base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom)) n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i] def update_line_params(n, s_nom_prev): factor = n.lines.s_nom_opt / s_nom_prev for attr, carrier in (('x', 'AC'), ('r', 'DC')): ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i) n.lines.loc[ln_i, attr] /= factor[ln_i] ln_i = ext_i & typed_i n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i] def msq_diff(n, s_nom_prev): lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \ n.lines['s_nom_opt'].mean() logger.info(f""Mean square difference after iteration {iteration} is "" f""{lines_err}"") return lines_err def save_optimal_capacities(n, iteration, status): for c, attr in pd.Series(nominal_attrs)[n.branch_components].items(): n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt'] setattr(n, f""status_{iteration}"", status) setattr(n, f""objective_{iteration}"", n.objective) n.iteration = iteration if track_iterations: for c, attr in pd.Series(nominal_attrs)[n.branch_components].items(): n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}'] iteration = 1 kwargs['store_basis'] = True diff = msq_threshold while diff >= msq_threshold or iteration < min_iterations: if iteration > max_iterations: logger.info(f'Iteration {iteration} beyond max_iterations ' f'{max_iterations}. Stopping ...') break s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__())) status, termination_condition = network_lopf(n, snapshots, **kwargs) assert status == 'ok', ('Optimization failed with status {status}' f'and termination {termination_condition}') if track_iterations: save_optimal_capacities(n, iteration, status) update_line_params(n, s_nom_prev) diff = msq_diff(n, s_nom_prev) iteration += 1 logger.info('Running last lopf with fixed branches, overwrite p_nom ' 'for links and s_nom for lines') ext_links_i = get_extendable_i(n, 'Link') n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False network_lopf(n, snapshots, **kwargs) n.lines.loc[ext_i, 's_nom_extendable'] = True n.links.loc[ext_links_i, 'p_nom_extendable'] = True " 34122,"def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None, zmin=1, out=None): # pragma: no cover """"""Print and plot the confusion matrix for the intent classification. Normalization can be applied by setting `normalize=True`."""""" import matplotlib.pyplot as plt from matplotlib.colors import LogNorm zmax = cm.max() plt.clf() if not cmap: cmap = plt.cm.Blues plt.imshow(cm, interpolation='nearest', cmap=cmap, aspect='auto', norm=LogNorm(vmin=zmin, vmax=zmax)) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] logger.info(""Normalized confusion matrix: \n{}"".format(cm)) else: logger.info(""Confusion matrix, without normalization: \n{}"".format(cm)) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment=""center"", color=""white"" if cm[i, j] > thresh else ""black"") plt.ylabel('True label') plt.xlabel('Predicted label') # save confusion matrix to file before showing it if out: fig = plt.gcf() fig.set_size_inches(20, 20) fig.savefig(out, bbox_inches='tight') ","def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None, zmin=1, out=None) -> None: # pragma: no cover """"""Print and plot the confusion matrix for the intent classification. Normalization can be applied by setting `normalize=True`."""""" import matplotlib.pyplot as plt from matplotlib.colors import LogNorm zmax = cm.max() plt.clf() if not cmap: cmap = plt.cm.Blues plt.imshow(cm, interpolation='nearest', cmap=cmap, aspect='auto', norm=LogNorm(vmin=zmin, vmax=zmax)) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] logger.info(""Normalized confusion matrix: \n{}"".format(cm)) else: logger.info(""Confusion matrix, without normalization: \n{}"".format(cm)) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment=""center"", color=""white"" if cm[i, j] > thresh else ""black"") plt.ylabel('True label') plt.xlabel('Predicted label') # save confusion matrix to file before showing it if out: fig = plt.gcf() fig.set_size_inches(20, 20) fig.savefig(out, bbox_inches='tight') " 8680,"def handle_list(options): """"""Display a list of configuration available from Sopel's homedir :param options: argument parser's parsed option This command display an unordered list of config's name from the default Sopel's homedir, without their extension:: $ sopel-config list default custom It is possible to filter by extension using the ``-e/--ext/--extension`` option, default to ``.cfg`` (the ``.`` prefix is not required). """""" display_path = getattr(options, 'display_path', False) extension = getattr(options, 'extension', '.cfg') if not extension.startswith('.'): extension = '.' + extension configs = utils.enumerate_configs(config.DEFAULT_HOMEDIR, extension) for config_filename in configs: if display_path: print(os.path.join(config.DEFAULT_HOMEDIR, config_filename)) else: name, _ = os.path.splitext(config_filename) print(name) ","def handle_list(options): """"""Display a list of configuration available from Sopel's homedir :param options: argument parser's parsed option This command display an unordered list of config's name from the default default homedir, without their extensions:: $ sopel-config list default custom It is possible to filter by extension using the ``-e/--ext/--extension`` option, default to ``.cfg`` (the ``.`` prefix is not required). """""" display_path = getattr(options, 'display_path', False) extension = getattr(options, 'extension', '.cfg') if not extension.startswith('.'): extension = '.' + extension configs = utils.enumerate_configs(config.DEFAULT_HOMEDIR, extension) for config_filename in configs: if display_path: print(os.path.join(config.DEFAULT_HOMEDIR, config_filename)) else: name, _ = os.path.splitext(config_filename) print(name) " 7446,"def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like-object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has shape 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has shape 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) ","def imsave(fname, arr, **kwargs): """"""Load a tiff image to file. Parameters ---------- fname : str or file File name or file-like-object. arr : ndarray The array to write kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imwrite`` function). Notes ----- Provided by the tifffile library [1]_, and supports many advanced image types including multi-page and floating point. This implementation will set `photomotric='RGB'` when writing if the first or last axis of arr has shape 3 or 4. To override this, explicitly specify the photometric kwarg. This implementation will set `planarconfig='SEPARATE'` when writing if the first axis of arr has length 3 or 4. To override this, explicitly specify the planarconfig kwarg. References ---------- .. [1] https://pypi.org/project/tifffile/ """""" if arr.shape[0] in [3, 4]: if 'planarconfig' not in kwargs: kwargs['planarconfig'] = 'SEPARATE' rgb = True else: rgb = arr.shape[-1] in [3, 4] if rgb and 'photometric' not in kwargs: kwargs['photometric'] = 'RGB' return tifffile_imwrite(fname, arr, **kwargs) " 49877,"def get_cams_radiation(start_date, end_date, latitude, longitude, email, service='mcclear', altitude=None, time_step='1h', time_ref='UT', verbose=False, integrated=False, label=None, map_variables=True, server='www.soda-is.com'): """""" Retrieve time-series of radiation and/or clear-sky global, beam, and diffuse radiation from CAMS [1]_, [2]_ using the WGET service [3]_. Time coverage: 2004-01-01 to two days ago Access: free, but requires registration, see [1]_ Requests: max. 100 per day Geographical coverage: Wordwide for CAMS McClear and -66° to 66° in both latitude and longitude for CAMS Radiation Parameters ---------- start_date: datetime like First day of the requested period end_date: datetime like Last day of the requested period latitude: float in decimal degrees, between -90 and 90, north is positive (ISO 19115) longitude : float in decimal degrees, between -180 and 180, east is positive (ISO 19115) altitude: float, default: None Altitude in meters. If None, then the altitude is determined from the NASA SRTM database email: str Email address linked to a SoDa account service: {'mcclear', 'cams_radiation'} Specify whether to retrieve CAMS Radiation or McClear parameters time_step: str, {'1min', '15min', '1h', '1d', '1M'}, default: '1h' Time step of the time series, either 1 minute, 15 minute, hourly, daily, or monthly. time_reference: str, {'UT', 'TST'}, default: 'UT' 'UT' (universal time) or 'TST' (True Solar Time) verbose: boolean, default: False Verbose mode outputs additional parameters (aerosols). Only avaiable for 1 minute and universal time. See [1] for parameter description. integrated: boolean, default False Whether to return radiation parameters as integrated values (Wh/m^2) or as average irradiance values (W/m^2) (pvlib preferred units) label: {‘right’, ‘left’}, default: None Which bin edge label to label time-step with. The default is ‘left’ for all time steps except for ‘1M’ which has a default of ‘right’. map_variables: bool, default: True When true, renames columns of the DataFrame to pvlib variable names where applicable. See variable CAMS_RADIATION_VARIABLE_MAP. server: str, default: 'www.soda-is.com' Main server (www.soda-is.com) or backup mirror server (pro.soda-is.com) Returns ------- data: pandas.DataFrame Timeseries data, see Notes for columns meta: dict Metadata for the requested time-series Notes ----- In order to use the CAMS services, users must registre for a free SoDa account using an email addres [1]_. The returned data DataFrame includes the following fields: ======================= ====== ========================================== Key, mapped key Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- Observation period str Beginning/end of time period TOA, ghi_extra float Horizontal radiation at top of atmosphere Clear sky GHI, ghi_clear float Clear sky global radiation on horizontal Clear sky BHI, bhi float Clear sky beam radiation on horizontal Clear sky DHI, dhi_clear float Clear sky diffuse radiation on horizontal Clear sky BNI, dni_clear float Clear sky beam radiation normal to sun GHI, ghi* float Global horizontal radiation BHI, bhi* float Beam (direct) radiation on horizontal DHI, dhi* float Diffuse horizontal radiation BNI, dni* float Beam (direct) radiation normal to the sun Reliability* float Fraction of reliable data in summarization ======================= ====== ========================================== *Parameters only returned if service='cams_radiation'. For description of additional output parameters in verbose mode, see [1]_ and [2]_. Note that it is recommended to specify the latitude and longitude to at least the fourth decimal place. Variables corresponding to standard pvlib variables are renamed, e.g. `sza` becomes `solar_zenith`. See the `pvlib.iotools.cams.CAMS_RADIATION_VARIABLE_MAP` dict for the complete mapping. See Also -------- pvlib.iotools.read_cams_radiation, pvlib.iotools.parse_cams_radiation Raises ------ requests.HTTPError If the request is invalid, then an XML file is returned by the CAMS service and the error message will be raised as an expcetion. References ---------- .. [1] `CAMS Radiation Service Info `_ .. [2] `CAMS McClear Service Info `_ .. [3] `CAMS McClear Automatic Access `_ """""" if time_step in TIME_STEPS_MAP.keys(): time_step_str = TIME_STEPS_MAP[time_step] else: warnings.warn('Time step not recognized, 1 hour time step used!') time_step, time_step_str = '1h', 'PT01H' if (verbose is True) & ((time_step != '1min') | (time_ref != 'UT')): verbose = False warnings.warn(""Verbose mode only supports 1 min. UT time series!"") # Format verbose variable to the required format: {'true', 'false'} verbose = str(verbose).lower() if altitude is None: # Let SoDa get elevation from the NASA SRTM database altitude = -999 # Start and end date should be in the format: yyyy-mm-dd start_date = start_date.strftime('%Y-%m-%d') end_date = end_date.strftime('%Y-%m-%d') email = email.replace('@', '%2540') # Format email address service = 'get_{}'.format(service.lower()) # Format CAMS service string # Manual format the request url, due to uncommon usage of & and ; in url url = (""http://{}/service/wps?Service=WPS&Request=Execute&"" ""Identifier={}&version=1.0.0&RawDataOutput=irradiation&"" ""DataInputs=latitude={};longitude={};altitude={};"" ""date_begin={};date_end={};time_ref={};summarization={};"" ""username={};verbose={}"" ).format(server, service, latitude, longitude, altitude, start_date, end_date, time_ref, time_step_str, email, verbose) res = requests.get(url) # Invalid requests returns helpful XML error message if res.headers['Content-Type'] == 'application/xml': errors = res.text.split('ows:ExceptionText')[1][1:-2] raise requests.HTTPError(errors, response=res) # Check if returned file is a csv data file elif res.headers['Content-Type'] == 'application/csv': fbuf = io.StringIO(res.content.decode('utf-8')) data, meta = parse_cams_radiation(fbuf, integrated=integrated, label=label, map_variables=map_variables) return data, meta else: warnings.warn('File content type not recognized.') ","def get_cams_radiation(start_date, end_date, latitude, longitude, email, service='mcclear', altitude=None, time_step='1h', time_ref='UT', verbose=False, integrated=False, label=None, map_variables=True, server='www.soda-is.com'): """""" Retrieve time-series of radiation and/or clear-sky global, beam, and diffuse radiation from CAMS [1]_, [2]_ using the WGET service [3]_. Time coverage: 2004-01-01 to two days ago Access: free, but requires registration, see [1]_ Requests: max. 100 per day Geographical coverage: Wordwide for CAMS McClear and -66° to 66° in both latitude and longitude for CAMS Radiation Parameters ---------- start_date: datetime like First day of the requested period end_date: datetime like Last day of the requested period latitude: float in decimal degrees, between -90 and 90, north is positive (ISO 19115) longitude : float in decimal degrees, between -180 and 180, east is positive (ISO 19115) altitude: float, default: None Altitude in meters. If None, then the altitude is determined from the NASA SRTM database email: str Email address linked to a SoDa account service: {'mcclear', 'cams_radiation'} Specify whether to retrieve CAMS Radiation or McClear parameters time_step: str, {'1min', '15min', '1h', '1d', '1M'}, default: '1h' Time step of the time series, either 1 minute, 15 minute, hourly, daily, or monthly. time_reference: str, {'UT', 'TST'}, default: 'UT' 'UT' (universal time) or 'TST' (True Solar Time) verbose: boolean, default: False Verbose mode outputs additional parameters (aerosols). Only avaiable for 1 minute and universal time. See [1] for parameter description. integrated: boolean, default False Whether to return radiation parameters as integrated values (Wh/m^2) or as average irradiance values (W/m^2) (pvlib preferred units) label: {‘right’, ‘left’}, default: None Which bin edge label to label time-step with. The default is ‘left’ for all time steps except for ‘1M’ which has a default of ‘right’. map_variables: bool, default: True When true, renames columns of the DataFrame to pvlib variable names where applicable. See variable CAMS_RADIATION_VARIABLE_MAP. server: str, default: 'www.soda-is.com' Main server (www.soda-is.com) or backup mirror server (pro.soda-is.com) Returns ------- data: pandas.DataFrame Timeseries data, see Notes for columns meta: dict Metadata for the requested time-series Notes ----- In order to use the CAMS services, users must registre for a free SoDa account using an email addres [1]_. The returned data DataFrame includes the following fields: ======================= ====== ========================================== Key, mapped key Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- Observation period str Beginning/end of time period TOA, ghi_extra float Horizontal radiation at top of atmosphere Clear sky GHI, ghi_clear float Clear sky global radiation on horizontal Clear sky BHI, bhi float Clear sky beam radiation on horizontal Clear sky DHI, dhi_clear float Clear sky diffuse radiation on horizontal Clear sky BNI, dni_clear float Clear sky beam radiation normal to sun GHI, ghi* float Global horizontal radiation BHI, bhi* float Beam (direct) radiation on horizontal DHI, dhi* float Diffuse horizontal radiation BNI, dni* float Beam (direct) radiation normal to the sun Reliability* float Fraction of reliable data in summarization ======================= ====== ========================================== *Parameters only returned if service='cams_radiation'. For description of additional output parameters in verbose mode, see [1]_ and [2]_. Note that it is recommended to specify the latitude and longitude to at least the fourth decimal place. Variables corresponding to standard pvlib variables are renamed, e.g. `sza` becomes `solar_zenith`. See the `pvlib.iotools.cams.CAMS_RADIATION_VARIABLE_MAP` dict for the complete mapping. See Also -------- pvlib.iotools.read_cams_radiation, pvlib.iotools.parse_cams_radiation Raises ------ requests.HTTPError If the request is invalid, then an XML file is returned by the CAMS service and the error message will be raised as an expcetion. References ---------- .. [1] `CAMS Radiation Service Info `_ .. [2] `CAMS McClear Service Info `_ .. [3] `CAMS McClear Automatic Access `_ """""" try: time_step_str = TIME_STEPS_MAP[time_step] except KeyError: raise ValueError(f'Time step not recognized. Must be one of {list(TIME_STEPS_MAP.keys())}') if (verbose is True) & ((time_step != '1min') | (time_ref != 'UT')): verbose = False warnings.warn(""Verbose mode only supports 1 min. UT time series!"") # Format verbose variable to the required format: {'true', 'false'} verbose = str(verbose).lower() if altitude is None: # Let SoDa get elevation from the NASA SRTM database altitude = -999 # Start and end date should be in the format: yyyy-mm-dd start_date = start_date.strftime('%Y-%m-%d') end_date = end_date.strftime('%Y-%m-%d') email = email.replace('@', '%2540') # Format email address service = 'get_{}'.format(service.lower()) # Format CAMS service string # Manual format the request url, due to uncommon usage of & and ; in url url = (""http://{}/service/wps?Service=WPS&Request=Execute&"" ""Identifier={}&version=1.0.0&RawDataOutput=irradiation&"" ""DataInputs=latitude={};longitude={};altitude={};"" ""date_begin={};date_end={};time_ref={};summarization={};"" ""username={};verbose={}"" ).format(server, service, latitude, longitude, altitude, start_date, end_date, time_ref, time_step_str, email, verbose) res = requests.get(url) # Invalid requests returns helpful XML error message if res.headers['Content-Type'] == 'application/xml': errors = res.text.split('ows:ExceptionText')[1][1:-2] raise requests.HTTPError(errors, response=res) # Check if returned file is a csv data file elif res.headers['Content-Type'] == 'application/csv': fbuf = io.StringIO(res.content.decode('utf-8')) data, meta = parse_cams_radiation(fbuf, integrated=integrated, label=label, map_variables=map_variables) return data, meta else: warnings.warn('File content type not recognized.') " 32117,"def should_test_content_pack(pack_name: str, marketplace_version: str, id_set: dict) -> Tuple[bool, str]: """"""Checks if content pack should be tested in the build: - Content pack is not in skipped packs - Content pack is certified - Content pack is not deprecated - Content pack is not supported the marketplace_version Args: pack_name (str): The pack name to check if it should be tested marketplace_version (str): id_set (dict): Structure which holds all content entities to extract pack names from. Returns: bool: True if should be tested, False otherwise """""" if not pack_name: return False, 'Invalid pack name' pack_path = os.path.join(PACKS_DIR, pack_name) if pack_name in SKIPPED_PACKS: return False, 'Pack is either the ""NonSupported"" pack or the ""DeprecatedContent"" pack.' if not is_pack_xsoar_supported(pack_path): return False, 'Pack is not XSOAR supported' if is_pack_deprecated(pack_path): return False, 'Pack is Deprecated' if marketplace_version not in get_pack_supported_marketplace_version(pack_name, id_set): return False, 'Pack is not supported in this marketplace version' return True, '' ","def should_test_content_pack(pack_name: str, marketplace_version: str, id_set: dict) -> Tuple[bool, str]: """"""Checks if content pack should be tested in the build: - Content pack is not in skipped packs - Content pack is certified - Content pack is not deprecated - Content pack is supported in the marketplace_version Args: pack_name (str): The pack name to check if it should be tested marketplace_version (str): id_set (dict): Structure which holds all content entities to extract pack names from. Returns: bool: True if should be tested, False otherwise """""" if not pack_name: return False, 'Invalid pack name' pack_path = os.path.join(PACKS_DIR, pack_name) if pack_name in SKIPPED_PACKS: return False, 'Pack is either the ""NonSupported"" pack or the ""DeprecatedContent"" pack.' if not is_pack_xsoar_supported(pack_path): return False, 'Pack is not XSOAR supported' if is_pack_deprecated(pack_path): return False, 'Pack is Deprecated' if marketplace_version not in get_pack_supported_marketplace_version(pack_name, id_set): return False, 'Pack is not supported in this marketplace version' return True, '' " 28055,"def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('products', 'confidentiality') # ### end Alembic commands ### ","def downgrade(): op.drop_column('products', 'confidentiality') " 8305,"def find_targets(run_name, owner, job_id=None): lock_args = [ 'teuthology-lock', '--list-targets', '--desc-pattern', '/' + run_name + '/' + (job_id if job_id else ''), '--status', 'up', '--owner', owner ] proc = subprocess.Popen(lock_args, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() out_obj = yaml.safe_load(stdout) if not out_obj or 'targets' not in out_obj: return {} return out_obj ","def find_targets(run_name, owner, job_id=None): lock_args = [ 'teuthology-lock', '--list-targets', '--desc-pattern', '/' + run_name + '/' + job_id or '' '--status', 'up', '--owner', owner ] proc = subprocess.Popen(lock_args, stdout=subprocess.PIPE) stdout, stderr = proc.communicate() out_obj = yaml.safe_load(stdout) if not out_obj or 'targets' not in out_obj: return {} return out_obj " 55083,"def qnode( device, interface=""autograd"", diff_method=""best"", mutable=True, max_expansion=10, h=1e-7, order=1, shift=np.pi / 2, adjoint_cache=True, argnum=None, **kwargs, ): """"""Decorator for creating QNodes. This decorator is used to indicate to PennyLane that the decorated function contains a :ref:`quantum variational circuit ` that should be bound to a compatible device. The QNode calls the quantum function to construct a :class:`~.QuantumTape` instance representing the quantum circuit. Args: func (callable): a quantum function device (~.Device): a PennyLane-compatible device interface (str): The interface that will be used for classical backpropagation. This affects the types of objects that can be passed to/returned from the QNode: * ``""autograd""``: Allows autograd to backpropogate through the QNode. The QNode accepts default Python types (floats, ints, lists) as well as NumPy array arguments, and returns NumPy arrays. * ``""torch""``: Allows PyTorch to backpropogate through the QNode. The QNode accepts and returns Torch tensors. * ``""tf""``: Allows TensorFlow in eager mode to backpropogate through the QNode. The QNode accepts and returns TensorFlow ``tf.Variable`` and ``tf.tensor`` objects. * ``None``: The QNode accepts default Python types (floats, ints, lists) as well as NumPy array arguments, and returns NumPy arrays. It does not connect to any machine learning library automatically for backpropagation. diff_method (str): the method of differentiation to use in the created QNode. * ``""best""``: Best available method. Uses classical backpropagation or the device directly to compute the gradient if supported, otherwise will use the analytic parameter-shift rule where possible with finite-difference as a fallback. * ``""backprop""``: Use classical backpropagation. Only allowed on simulator devices that are classically end-to-end differentiable, for example :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned QNode can only be used with the machine-learning framework supported by the device; a separate ``interface`` argument should not be passed. * ``""reversible""``: Uses a reversible method for computing the gradient. This method is similar to ``""backprop""``, but trades off increased runtime with significantly lower memory usage. Compared to the parameter-shift rule, the reversible method can be faster or slower, depending on the density and location of parametrized gates in a circuit. Only allowed on (simulator) devices with the ""reversible"" capability, for example :class:`default.qubit <~.DefaultQubit>`. * ``""adjoint""``: Uses an adjoint `method `__ that reverses through the circuit after a forward pass by iteratively applying the inverse (adjoint) gate. This method is similar to the reversible method, but has a lower time overhead and a similar memory overhead. Only allowed on simulator devices such as :class:`default.qubit <~.DefaultQubit>`. * ``""device""``: Queries the device directly for the gradient. Only allowed on devices that provide their own gradient rules. * ``""parameter-shift""``: Use the analytic parameter-shift rule for all supported quantum operation arguments, with finite-difference as a fallback. * ``""finite-diff""``: Uses numerical finite-differences for all quantum operation arguments. mutable (bool): If True, the underlying quantum circuit is re-constructed with every evaluation. This is the recommended approach, as it allows the underlying quantum structure to depend on (potentially trainable) QNode input arguments, however may add some overhead at evaluation time. If this is set to False, the quantum structure will only be constructed on the *first* evaluation of the QNode, and is stored and re-used for further quantum evaluations. Only set this to False if it is known that the underlying quantum structure is **independent of QNode input**. max_expansion (int): The number of times the internal circuit should be expanded when executed on a device. Expansion occurs when an operation or measurement is not supported, and results in a gate decomposition. If any operations in the decomposition remain unsupported by the device, another expansion occurs. h (float): step size for the finite difference method. order (int): The order of the finite difference method to use. ``1`` corresponds to forward finite differences, ``2`` to centered finite differences. shift (float): the size of the shift for two-term parameter-shift gradient computations. adjoint_cache (bool): for TensorFlow and PyTorch interfaces and adjoint differentiation, this indicates whether to save the device state after the forward pass. Doing so saves a forward execution. Device state automatically reused with autograd and JAX interfaces. argnum (int, list(int), None): Which argument(s) to compute the Jacobian with respect to. When there are fewer parameters specified than the total number of trainable parameters, the jacobian is being estimated. Note that this option is only applicable for the following differentiation methods: ``""parameter-shift""``, ``""finite-diff""`` and ``""reversible""``. **kwargs is used to catch all unrecognized keyword arguments and provide a user warning about them. **Example** >>> dev = qml.device(""default.qubit"", wires=1) >>> @qml.qnode(dev) >>> def circuit(x): >>> qml.RX(x, wires=0) >>> return expval(qml.PauliZ(0)) """""" @lru_cache() def qfunc_decorator(func): """"""The actual decorator"""""" qn = QNode( func, device, interface=interface, diff_method=diff_method, mutable=mutable, max_expansion=max_expansion, h=h, order=order, shift=shift, adjoint_cache=adjoint_cache, argnum=argnum, **kwargs, ) return update_wrapper(qn, func) return qfunc_decorator ","def qnode( device, interface=""autograd"", diff_method=""best"", mutable=True, max_expansion=10, h=1e-7, order=1, shift=np.pi / 2, adjoint_cache=True, argnum=None, **kwargs, ): """"""Decorator for creating QNodes. This decorator is used to indicate to PennyLane that the decorated function contains a :ref:`quantum variational circuit ` that should be bound to a compatible device. The QNode calls the quantum function to construct a :class:`~.QuantumTape` instance representing the quantum circuit. Args: func (callable): a quantum function device (~.Device): a PennyLane-compatible device interface (str): The interface that will be used for classical backpropagation. This affects the types of objects that can be passed to/returned from the QNode: * ``""autograd""``: Allows autograd to backpropogate through the QNode. The QNode accepts default Python types (floats, ints, lists) as well as NumPy array arguments, and returns NumPy arrays. * ``""torch""``: Allows PyTorch to backpropogate through the QNode. The QNode accepts and returns Torch tensors. * ``""tf""``: Allows TensorFlow in eager mode to backpropogate through the QNode. The QNode accepts and returns TensorFlow ``tf.Variable`` and ``tf.tensor`` objects. * ``None``: The QNode accepts default Python types (floats, ints, lists) as well as NumPy array arguments, and returns NumPy arrays. It does not connect to any machine learning library automatically for backpropagation. diff_method (str): the method of differentiation to use in the created QNode. * ``""best""``: Best available method. Uses classical backpropagation or the device directly to compute the gradient if supported, otherwise will use the analytic parameter-shift rule where possible with finite-difference as a fallback. * ``""backprop""``: Use classical backpropagation. Only allowed on simulator devices that are classically end-to-end differentiable, for example :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned QNode can only be used with the machine-learning framework supported by the device; a separate ``interface`` argument should not be passed. * ``""reversible""``: Uses a reversible method for computing the gradient. This method is similar to ``""backprop""``, but trades off increased runtime with significantly lower memory usage. Compared to the parameter-shift rule, the reversible method can be faster or slower, depending on the density and location of parametrized gates in a circuit. Only allowed on (simulator) devices with the ""reversible"" capability, for example :class:`default.qubit <~.DefaultQubit>`. * ``""adjoint""``: Uses an adjoint `method `__ that reverses through the circuit after a forward pass by iteratively applying the inverse (adjoint) gate. This method is similar to the reversible method, but has a lower time overhead and a similar memory overhead. Only allowed on simulator devices such as :class:`default.qubit <~.DefaultQubit>`. * ``""device""``: Queries the device directly for the gradient. Only allowed on devices that provide their own gradient rules. * ``""parameter-shift""``: Use the analytic parameter-shift rule for all supported quantum operation arguments, with finite-difference as a fallback. * ``""finite-diff""``: Uses numerical finite-differences for all quantum operation arguments. mutable (bool): If True, the underlying quantum circuit is re-constructed with every evaluation. This is the recommended approach, as it allows the underlying quantum structure to depend on (potentially trainable) QNode input arguments, however may add some overhead at evaluation time. If this is set to False, the quantum structure will only be constructed on the *first* evaluation of the QNode, and is stored and re-used for further quantum evaluations. Only set this to False if it is known that the underlying quantum structure is **independent of QNode input**. max_expansion (int): The number of times the internal circuit should be expanded when executed on a device. Expansion occurs when an operation or measurement is not supported, and results in a gate decomposition. If any operations in the decomposition remain unsupported by the device, another expansion occurs. h (float): step size for the finite difference method. order (int): The order of the finite difference method to use. ``1`` corresponds to forward finite differences, ``2`` to centered finite differences. shift (float): the size of the shift for two-term parameter-shift gradient computations. adjoint_cache (bool): for TensorFlow and PyTorch interfaces and adjoint differentiation, this indicates whether to save the device state after the forward pass. Doing so saves a forward execution. Device state automatically reused with autograd and JAX interfaces. argnum (int, list(int), None): Which argument(s) to compute the Jacobian with respect to. When there are fewer parameters specified than the total number of trainable parameters, the jacobian is being estimated. Note that this option is only applicable for the following differentiation methods: ``""parameter-shift""``, ``""finite-diff""`` and ``""reversible""``. **kwargs is used to catch all unrecognized keyword arguments and provide a user warning about them. **Example** >>> dev = qml.device(""default.qubit"", wires=1) >>> @qml.qnode(dev) >>> def circuit(x): >>> qml.RX(x, wires=0) >>> return expval(qml.PauliZ(0)) """""" @lru_cache() def qfunc_decorator(func): """"""The actual decorator"""""" qn = QNode( func, device, interface=interface, diff_method=diff_method, mutable=mutable, max_expansion=max_expansion, h=h, order=order, shift=shift, adjoint_cache=adjoint_cache, argnum=argnum, **kwargs, ) return update_wrapper(qn, func) return qfunc_decorator " 6436,"def execute(): frappe.reload_doctype('Call Log') frappe.db.sql(""UPDATE `tabCall Log` SET `type`='Incoming' where `type` is NULL"") frappe.db.sql(""UPDATE `tabCall Log` SET `status`='No Answer' where `status` is 'Missed'"") logs = frappe.get_all('Call Log', fields=['lead', 'contact', 'name', 'contact_name', 'lead_name']) for log in logs: links = [] if log.lead: links.append({ 'link_doctype': 'Lead', 'link_name': log.lead, 'link_title': log.lead_name, }) if log.contact: links.append({ 'link_doctype': 'Contact', 'link_name': log.contact, 'link_title': log.contact_name, }) if links: call_log = frappe.get_doc('Call Log', log.name) call_log.set('links', links) call_log.save() ","def execute(): frappe.reload_doctype('Call Log') frappe.db.sql(""UPDATE `tabCall Log` SET `type`='Incoming' where `type` is NULL"") frappe.db.sql(""UPDATE `tabCall Log` SET `status`='No Answer' where `status`='Missed'"") logs = frappe.get_all('Call Log', fields=['lead', 'contact', 'name', 'contact_name', 'lead_name']) for log in logs: links = [] if log.lead: links.append({ 'link_doctype': 'Lead', 'link_name': log.lead, 'link_title': log.lead_name, }) if log.contact: links.append({ 'link_doctype': 'Contact', 'link_name': log.contact, 'link_title': log.contact_name, }) if links: call_log = frappe.get_doc('Call Log', log.name) call_log.set('links', links) call_log.save() " 43996,"def max_weight_cycle(graph, constrained=True): r""""""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the maximum-weighted cycle problem, for a given graph. The maximum-weighted cycle problem is defined in the following way (see `here `__ for more details). The product of weights of a subset of edges in a graph is given by .. math:: P = \prod_{(i, j) \in E} [(c_{ij} - 1)x_{ij} + 1] where :math:`E` are the edges of the graph, :math:`x_{ij}` is a binary number that selects whether to include the edge :math:`(i, j)` and :math:`c_{ij}` is the corresponding edge weight. Our objective is to maximimize :math:`P`, subject to selecting the :math:`x_{ij}` so that our subset of edges composes a `cycle `__. Args: graph (nx.Graph or rx.Py(Di)Graph): the directed graph on which the Hamiltonians are defined constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained) Returns: (.Hamiltonian, .Hamiltonian, dict): The cost and mixer Hamiltonians, as well as a dictionary mapping from wires to the graph's edges .. UsageDetails:: There are two variations of QAOA for this problem, constrained and unconstrained: **Constrained** .. note:: This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas in `arXiv:1709.03489 `__. The maximum weighted cycle cost Hamiltonian for unconstrained QAOA is .. math:: H_C = H_{\rm loss}. Here, :math:`H_{\rm loss}` is a loss Hamiltonian: .. math:: H_{\rm loss} = \sum_{(i, j) \in E} Z_{ij}\log c_{ij} where :math:`E` are the edges of the graph and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting upon the wire specified by the edge :math:`(i, j)` (see :func:`~.loss_hamiltonian` for more details). The returned mixer Hamiltonian is :func:`~.cycle_mixer` given by .. math:: H_M = \frac{1}{4}\sum_{(i, j)\in E} \left(\sum_{k \in V, k\neq i, k\neq j, (i, k) \in E, (k, j) \in E} \left[X_{ij}X_{ik}X_{kj} +Y_{ij}Y_{ik}X_{kj} + Y_{ij}X_{ik}Y_{kj} - X_{ij}Y_{ik}Y_{kj}\right] \right). This mixer provides transitions between collections of cycles, i.e., any subset of edges in :math:`E` such that all the graph's nodes :math:`V` have zero net flow (see the :func:`~.net_flow_constraint` function). .. note:: **Recommended initialization circuit:** Your circuit must prepare a state that corresponds to a cycle (or a superposition of cycles). Follow the example code below to see how this is done. **Unconstrained** The maximum weighted cycle cost Hamiltonian for constrained QAOA is defined as: .. math:: H_C \ = H_{\rm loss} + 3 H_{\rm netflow} + 3 H_{\rm outflow}. The netflow constraint Hamiltonian :func:`~.net_flow_constraint` is given by .. math:: H_{\rm netflow} = \sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} - \sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}, where :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are the outdegree and indegree, respectively, of node :math:`i`. It is minimized whenever a subset of edges in :math:`E` results in zero net flow from each node in :math:`V`. The outflow constraint Hamiltonian :func:`~.out_flow_constraint` is given by .. math:: H_{\rm outflow} = \sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I} - 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} + \left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right). It is minimized whenever a subset of edges in :math:`E` results in an outflow of at most one from each node in :math:`V`. The returned mixer Hamiltonian is :func:`~.x_mixer` applied to all wires. .. note:: **Recommended initialization circuit:** Even superposition over all basis states. **Example** First set up a simple graph: .. code-block:: python import pennylane as qml import numpy as np import networkx as nx a = np.random.random((4, 4)) np.fill_diagonal(a, 0) g = nx.DiGraph(a) The cost and mixer Hamiltonian as well as the mapping from wires to edges can be loaded using: >>> cost, mixer, mapping = qml.qaoa.max_weight_cycle(g, constrained=True) Since we are using ``constrained=True``, we must ensure that the input state to the QAOA algorithm corresponds to a cycle. Consider the mapping: >>> mapping {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (1, 0), 4: (1, 2), 5: (1, 3), 6: (2, 0), 7: (2, 1), 8: (2, 3), 9: (3, 0), 10: (3, 1), 11: (3, 2)} A simple cycle is given by the edges ``(0, 1)`` and ``(1, 0)`` and corresponding wires ``0`` and ``3``. Hence, the state :math:`|100100000000\rangle` corresponds to a cycle and can be prepared using :class:`~.BasisState` or simple :class:`~.PauliX` rotations on the ``0`` and ``3`` wires. """""" if not isinstance(graph, (nx.Graph, rx.PyGraph, rx.PyDiGraph)): raise ValueError( f""Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"" ) mapping = qaoa.cycle.wires_to_edges(graph) if constrained: cost_h = qaoa.cycle.loss_hamiltonian(graph) cost_h.grouping_indices = [list(range(len(cost_h.ops)))] return (cost_h, qaoa.cycle.cycle_mixer(graph), mapping) cost_h = qaoa.cycle.loss_hamiltonian(graph) + 3 * ( qaoa.cycle.net_flow_constraint(graph) + qaoa.cycle.out_flow_constraint(graph) ) mixer_h = qaoa.x_mixer(mapping.keys()) return (cost_h, mixer_h, mapping) ","def max_weight_cycle(graph, constrained=True): r""""""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the maximum-weighted cycle problem, for a given graph. The maximum-weighted cycle problem is defined in the following way (see `here `__ for more details). The product of weights of a subset of edges in a graph is given by .. math:: P = \prod_{(i, j) \in E} [(c_{ij} - 1)x_{ij} + 1] where :math:`E` are the edges of the graph, :math:`x_{ij}` is a binary number that selects whether to include the edge :math:`(i, j)` and :math:`c_{ij}` is the corresponding edge weight. Our objective is to maximimize :math:`P`, subject to selecting the :math:`x_{ij}` so that our subset of edges composes a `cycle `__. Args: graph (nx.Graph or rx.PyGraph or rx.PyDiGraph): the directed graph on which the Hamiltonians are defined constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained) Returns: (.Hamiltonian, .Hamiltonian, dict): The cost and mixer Hamiltonians, as well as a dictionary mapping from wires to the graph's edges .. UsageDetails:: There are two variations of QAOA for this problem, constrained and unconstrained: **Constrained** .. note:: This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas in `arXiv:1709.03489 `__. The maximum weighted cycle cost Hamiltonian for unconstrained QAOA is .. math:: H_C = H_{\rm loss}. Here, :math:`H_{\rm loss}` is a loss Hamiltonian: .. math:: H_{\rm loss} = \sum_{(i, j) \in E} Z_{ij}\log c_{ij} where :math:`E` are the edges of the graph and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting upon the wire specified by the edge :math:`(i, j)` (see :func:`~.loss_hamiltonian` for more details). The returned mixer Hamiltonian is :func:`~.cycle_mixer` given by .. math:: H_M = \frac{1}{4}\sum_{(i, j)\in E} \left(\sum_{k \in V, k\neq i, k\neq j, (i, k) \in E, (k, j) \in E} \left[X_{ij}X_{ik}X_{kj} +Y_{ij}Y_{ik}X_{kj} + Y_{ij}X_{ik}Y_{kj} - X_{ij}Y_{ik}Y_{kj}\right] \right). This mixer provides transitions between collections of cycles, i.e., any subset of edges in :math:`E` such that all the graph's nodes :math:`V` have zero net flow (see the :func:`~.net_flow_constraint` function). .. note:: **Recommended initialization circuit:** Your circuit must prepare a state that corresponds to a cycle (or a superposition of cycles). Follow the example code below to see how this is done. **Unconstrained** The maximum weighted cycle cost Hamiltonian for constrained QAOA is defined as: .. math:: H_C \ = H_{\rm loss} + 3 H_{\rm netflow} + 3 H_{\rm outflow}. The netflow constraint Hamiltonian :func:`~.net_flow_constraint` is given by .. math:: H_{\rm netflow} = \sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} - \sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2}, where :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are the outdegree and indegree, respectively, of node :math:`i`. It is minimized whenever a subset of edges in :math:`E` results in zero net flow from each node in :math:`V`. The outflow constraint Hamiltonian :func:`~.out_flow_constraint` is given by .. math:: H_{\rm outflow} = \sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I} - 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} + \left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right). It is minimized whenever a subset of edges in :math:`E` results in an outflow of at most one from each node in :math:`V`. The returned mixer Hamiltonian is :func:`~.x_mixer` applied to all wires. .. note:: **Recommended initialization circuit:** Even superposition over all basis states. **Example** First set up a simple graph: .. code-block:: python import pennylane as qml import numpy as np import networkx as nx a = np.random.random((4, 4)) np.fill_diagonal(a, 0) g = nx.DiGraph(a) The cost and mixer Hamiltonian as well as the mapping from wires to edges can be loaded using: >>> cost, mixer, mapping = qml.qaoa.max_weight_cycle(g, constrained=True) Since we are using ``constrained=True``, we must ensure that the input state to the QAOA algorithm corresponds to a cycle. Consider the mapping: >>> mapping {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (1, 0), 4: (1, 2), 5: (1, 3), 6: (2, 0), 7: (2, 1), 8: (2, 3), 9: (3, 0), 10: (3, 1), 11: (3, 2)} A simple cycle is given by the edges ``(0, 1)`` and ``(1, 0)`` and corresponding wires ``0`` and ``3``. Hence, the state :math:`|100100000000\rangle` corresponds to a cycle and can be prepared using :class:`~.BasisState` or simple :class:`~.PauliX` rotations on the ``0`` and ``3`` wires. """""" if not isinstance(graph, (nx.Graph, rx.PyGraph, rx.PyDiGraph)): raise ValueError( f""Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"" ) mapping = qaoa.cycle.wires_to_edges(graph) if constrained: cost_h = qaoa.cycle.loss_hamiltonian(graph) cost_h.grouping_indices = [list(range(len(cost_h.ops)))] return (cost_h, qaoa.cycle.cycle_mixer(graph), mapping) cost_h = qaoa.cycle.loss_hamiltonian(graph) + 3 * ( qaoa.cycle.net_flow_constraint(graph) + qaoa.cycle.out_flow_constraint(graph) ) mixer_h = qaoa.x_mixer(mapping.keys()) return (cost_h, mixer_h, mapping) " 54490,"def test_switch_label_when_param_insignificant() -> None: def _objective(trial: Trial) -> int: x = trial.suggest_int(""x"", 0, 2) _ = trial.suggest_int(""y"", -1, 1) return x ** 2 study = create_study() study.optimize(_objective, n_trials=100) ax = plot_param_importances(study) # Test if label for `y` param has been switched to `<0.01`. labels = ax.figure.findobj(lambda obj: ""<0.01"" in str(obj)) assert len(labels) == 1 ","def test_switch_label_when_param_insignificant() -> None: def _objective(trial: Trial) -> int: x = trial.suggest_int(""x"", 0, 2) _ = trial.suggest_int(""y"", -1, 1) return x ** 2 study = create_study() for x in range(1, 3): study.enqueue_trial({""x"": x, ""y"": 0}) study.optimize(_objective, n_trials=2) ax = plot_param_importances(study) # Test if label for `y` param has been switched to `<0.01`. labels = ax.figure.findobj(lambda obj: ""<0.01"" in str(obj)) assert len(labels) == 1 " 29747,"def tasks_assigned_to_worker(worker): """""" Get all the tasks associated with `worker`. Args: worker (orchestra.models.Worker): The specified worker object. Returns: tasks_assigned (dict): A dict with information about the worker's tasks, used in displaying the Orchestra dashboard. """""" valid_task_assignments = TaskAssignment.objects.exclude( task__status=Task.Status.ABORTED) # get all active task assignments for a user active_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.PROCESSING) .exclude( task__project__status__in=[Project.Status.PAUSED, Project.Status.COMPLETED]) .order_by('-task__project__priority', 'task__project__start_datetime')) inactive_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.SUBMITTED ) .exclude( task__status=Task.Status.COMPLETE) .exclude( task__project__status__in=[Project.Status.PAUSED, Project.Status.COMPLETED]) .order_by('-task__project__priority', 'task__project__start_datetime')) inactive_processing_task_assignments = [] inactive_review_task_assignments = [] for task_assignment in inactive_task_assignments: if ( valid_task_assignments .filter( status=TaskAssignment.Status.PROCESSING, task__id=task_assignment.task.id, assignment_counter__lt=task_assignment.assignment_counter) .exists()): inactive_processing_task_assignments.append(task_assignment) else: inactive_review_task_assignments.append(task_assignment) # TODO(marcua): Do a better job of paginating than cutting off to the most # recent 200 tasks. complete_task_assignments = ( valid_task_assignments .filter(Q(worker=worker) & (Q(task__status=Task.Status.COMPLETE) | Q(task__project__status=Project.Status.COMPLETED))) .order_by('-task__project__priority', '-task__project__start_datetime')[:200]) paused_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.PROCESSING, task__project__status=Project.Status.PAUSED) .order_by('-task__project__priority', 'task__project__start_datetime')) task_assignments_overview = { 'returned': ( active_task_assignments .filter(task__status=Task.Status.POST_REVIEW_PROCESSING)), 'in_progress': ( active_task_assignments .exclude(task__status=Task.Status.POST_REVIEW_PROCESSING)), 'pending_review': inactive_review_task_assignments, 'pending_processing': inactive_processing_task_assignments, 'paused': paused_task_assignments, 'complete': complete_task_assignments} tasks_assigned = [] time_now = timezone.now() pending_todos_filter = Q(status=Todo.Status.PENDING.value) non_template_todo_filter = Q(template=None) null_section_todo_filter = Q(section__isnull=True) | Q(section='') for state, task_assignments in iter(task_assignments_overview.items()): for task_assignment in task_assignments: step = task_assignment.task.step workflow_version = step.workflow_version next_todo_title = None next_todo_dict = {} should_be_active = False if state in ('returned', 'in_progress'): # TODO(aditya): Temporarily we are filtering out todos # with section values. Remove this comment once we # figure out a long term logic. next_todo = ( task_assignment.task.todos .filter( pending_todos_filter & non_template_todo_filter & null_section_todo_filter ).annotate( todo_order=Case( When( start_by_datetime__gt=time_now, then=Value(3)), When( due_datetime=None, then=Value(2)), default=Value(1), output_field=IntegerField() ) ) .order_by( 'todo_order', 'due_datetime', 'start_by_datetime', '-created_at' ) .first()) if next_todo: next_todo_title = next_todo.title start_str = ( next_todo.start_by_datetime.strftime( '%Y-%m-%dT%H:%M:%SZ' ) if next_todo.start_by_datetime else '' ) due_str = ( next_todo.due_datetime.strftime( '%Y-%m-%dT%H:%M:%SZ' ) if next_todo.due_datetime else '' ) next_todo_dict = { 'title': next_todo.title, 'start_by_datetime': start_str, 'due_datetime': due_str } # TODO(aditya): Temporarily we are filtering out todos # with section values. Remove this comment once we # figure out a long term logic. num_non_template_todos = ( task_assignment.task.todos .filter( non_template_todo_filter & null_section_todo_filter ).count()) # If a task has no todos (complete or incomplete) # assigned to it, then by default the task would be # marked as pending. When a task is first created and # picked up by a worker, it will thus be in pending # state, which is confusing behavior. We thus treat a # task with zero todos as active. After a task has one # or more todos assigned to it, its active/pending # state is determined by the presence of incomplete # todos. task_started = ( next_todo_title is not None and ( next_todo.start_by_datetime is None or next_todo.start_by_datetime <= time_now ) ) should_be_active = ( (num_non_template_todos == 0) or task_started) tasks_assigned.append({ 'id': task_assignment.task.id, 'assignment_id': task_assignment.id, 'step': step.name, 'project': workflow_version.name, 'detail': task_assignment.task.project.short_description, 'priority': task_assignment.task.project.priority, 'state': state, 'assignment_start_datetime': task_assignment.start_datetime, 'next_todo_dict': next_todo_dict, 'should_be_active': should_be_active, 'tags': task_assignment.task.tags.get('tags', []) }) return tasks_assigned ","def tasks_assigned_to_worker(worker): """""" Get all the tasks associated with `worker`. Args: worker (orchestra.models.Worker): The specified worker object. Returns: tasks_assigned (dict): A dict with information about the worker's tasks, used in displaying the Orchestra dashboard. """""" valid_task_assignments = TaskAssignment.objects.exclude( task__status=Task.Status.ABORTED) # get all active task assignments for a user active_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.PROCESSING) .exclude( task__project__status__in=[Project.Status.PAUSED, Project.Status.COMPLETED]) .order_by('-task__project__priority', 'task__project__start_datetime')) inactive_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.SUBMITTED ) .exclude( task__status=Task.Status.COMPLETE) .exclude( task__project__status__in=[Project.Status.PAUSED, Project.Status.COMPLETED]) .order_by('-task__project__priority', 'task__project__start_datetime')) inactive_processing_task_assignments = [] inactive_review_task_assignments = [] for task_assignment in inactive_task_assignments: if ( valid_task_assignments .filter( status=TaskAssignment.Status.PROCESSING, task__id=task_assignment.task.id, assignment_counter__lt=task_assignment.assignment_counter) .exists()): inactive_processing_task_assignments.append(task_assignment) else: inactive_review_task_assignments.append(task_assignment) # TODO(marcua): Do a better job of paginating than cutting off to the most # recent 200 tasks. complete_task_assignments = ( valid_task_assignments .filter(Q(worker=worker) & (Q(task__status=Task.Status.COMPLETE) | Q(task__project__status=Project.Status.COMPLETED))) .order_by('-task__project__priority', '-task__project__start_datetime')[:200]) paused_task_assignments = ( valid_task_assignments .filter( worker=worker, status=TaskAssignment.Status.PROCESSING, task__project__status=Project.Status.PAUSED) .order_by('-task__project__priority', 'task__project__start_datetime')) task_assignments_overview = { 'returned': ( active_task_assignments .filter(task__status=Task.Status.POST_REVIEW_PROCESSING)), 'in_progress': ( active_task_assignments .exclude(task__status=Task.Status.POST_REVIEW_PROCESSING)), 'pending_review': inactive_review_task_assignments, 'pending_processing': inactive_processing_task_assignments, 'paused': paused_task_assignments, 'complete': complete_task_assignments} tasks_assigned = [] time_now = timezone.now() pending_todos_filter = Q(status=Todo.Status.PENDING.value) non_template_todo_filter = Q(template=None) no_section_todo_filter = Q(section__isnull=True) | Q(section='') for state, task_assignments in iter(task_assignments_overview.items()): for task_assignment in task_assignments: step = task_assignment.task.step workflow_version = step.workflow_version next_todo_title = None next_todo_dict = {} should_be_active = False if state in ('returned', 'in_progress'): # TODO(aditya): Temporarily we are filtering out todos # with section values. Remove this comment once we # figure out a long term logic. next_todo = ( task_assignment.task.todos .filter( pending_todos_filter & non_template_todo_filter & null_section_todo_filter ).annotate( todo_order=Case( When( start_by_datetime__gt=time_now, then=Value(3)), When( due_datetime=None, then=Value(2)), default=Value(1), output_field=IntegerField() ) ) .order_by( 'todo_order', 'due_datetime', 'start_by_datetime', '-created_at' ) .first()) if next_todo: next_todo_title = next_todo.title start_str = ( next_todo.start_by_datetime.strftime( '%Y-%m-%dT%H:%M:%SZ' ) if next_todo.start_by_datetime else '' ) due_str = ( next_todo.due_datetime.strftime( '%Y-%m-%dT%H:%M:%SZ' ) if next_todo.due_datetime else '' ) next_todo_dict = { 'title': next_todo.title, 'start_by_datetime': start_str, 'due_datetime': due_str } # TODO(aditya): Temporarily we are filtering out todos # with section values. Remove this comment once we # figure out a long term logic. num_non_template_todos = ( task_assignment.task.todos .filter( non_template_todo_filter & null_section_todo_filter ).count()) # If a task has no todos (complete or incomplete) # assigned to it, then by default the task would be # marked as pending. When a task is first created and # picked up by a worker, it will thus be in pending # state, which is confusing behavior. We thus treat a # task with zero todos as active. After a task has one # or more todos assigned to it, its active/pending # state is determined by the presence of incomplete # todos. task_started = ( next_todo_title is not None and ( next_todo.start_by_datetime is None or next_todo.start_by_datetime <= time_now ) ) should_be_active = ( (num_non_template_todos == 0) or task_started) tasks_assigned.append({ 'id': task_assignment.task.id, 'assignment_id': task_assignment.id, 'step': step.name, 'project': workflow_version.name, 'detail': task_assignment.task.project.short_description, 'priority': task_assignment.task.project.priority, 'state': state, 'assignment_start_datetime': task_assignment.start_datetime, 'next_todo_dict': next_todo_dict, 'should_be_active': should_be_active, 'tags': task_assignment.task.tags.get('tags', []) }) return tasks_assigned " 40001,"def data_loaders(data, label_key=None, batch_size=100, shuffle=False): """"""Generate `DataLoader` for each split from `AnnData`. Params ------ data : `AnnData`, `Mapping[AnnData]` Needs to contain an `.obs` field named `split` defining the split. label_key : str Key for `.obs` defining the label. batch_size : int Batch size. shuffle : bool `DataLoader` `shuffle` parameter. Returns ------- A `dict[DataLoader]` indexed by the names of splits. """""" datasets = {} # torch data loaders = {} # loaders if isinstance(data, dict): splits = list(data.keys()) else: adata = data if 'split' not in adata.obs.columns: raise ValueError('Either pass dict with splits or an AnnData with obs column ""split"".') splits = list(adata.obs.split.unique()) # check that we have training and test split if not ('train' in splits and 'test' in splits): raise ValueError('Need to have ""train"" and ""test"" in split.') # ensure train comes first for encoder splits.remove(""train"") splits.insert(0, ""train"") label_encoder = None # is None for train for split in splits: if isinstance(data, dict): adata_split = data[split] else: if 'split' in adata.obs.columns: adata_split = adata[adata.obs.split == split].copy() # make copy to avoid slow lazy indexing datasets[split] = Dataset(adata_split, label_key=label_key, label_encoder=label_encoder) # need to set the fitted label encoder so that it's used in validation, # test, holdout or whatever might come after in the loop if split == 'train': # set once and never again label_encoder = datasets[split].label_encoder loaders[split] = DataLoader( dataset=datasets[split], batch_size=batch_size, shuffle=shuffle) return loaders ","def data_loaders(data, label_key=None, batch_size=100, shuffle=False): """"""Generate `DataLoader` for each split from `AnnData`. Params ------ data : `AnnData`, `Mapping[AnnData]` Needs to contain an `.obs` field named `split` defining the split. label_key : str Key for `.obs` defining the label. batch_size : int Batch size. shuffle : bool `DataLoader` `shuffle` parameter. Returns ------- A `dict[DataLoader]` indexed by the names of splits. """""" datasets = {} # torch data loaders = {} # loaders if isinstance(data, dict): splits = list(data.keys()) else: adata = data if 'split' not in adata.obs.columns: raise ValueError('Either pass dict with splits or an AnnData with obs column ""split"".') splits = list(adata.obs.split.unique()) # check that we have training and test split if not ('train' in splits and 'test' in splits): raise ValueError('Need to have ""train"" and ""test"" in split.') # ensure train comes first for encoder splits.remove(""train"") splits.insert(0, ""train"") label_encoder = None # is None for train for split in splits: if isinstance(data, cabc.Mapping): adata_split = data[split] else: if 'split' in adata.obs.columns: adata_split = adata[adata.obs.split == split].copy() # make copy to avoid slow lazy indexing datasets[split] = Dataset(adata_split, label_key=label_key, label_encoder=label_encoder) # need to set the fitted label encoder so that it's used in validation, # test, holdout or whatever might come after in the loop if split == 'train': # set once and never again label_encoder = datasets[split].label_encoder loaders[split] = DataLoader( dataset=datasets[split], batch_size=batch_size, shuffle=shuffle) return loaders " 56250,"def add_model_pages(output_root, parent_element, group, group_title): group_element = add_page(output_root, parent_element, title=group_title, id=f'omz_models_group_{group}', path=f'models/{group}/index.md') task_type_elements = {} device_support_path = OMZ_ROOT / 'models' / group / 'device_support.md' with device_support_path.open('r', encoding=""utf-8"") as device_support_file: raw_device_support = device_support_file.read() device_support_lines = re.findall(r'^\|\s\S+\s\|', raw_device_support, re.MULTILINE) device_support_lines = [device_support_line.strip(' |') for device_support_line in device_support_lines] for md_path in sorted(OMZ_ROOT.glob(f'models/{group}/*/**/*.md')): md_path_rel = md_path.relative_to(OMZ_ROOT) model_name = md_path_rel.parts[2] device_support_path_rel = device_support_path.relative_to(OMZ_ROOT) if model_name not in device_support_lines: if not (md_path.parent / 'composite-model.yml').exists(): raise RuntimeError(f'{device_support_path_rel}: ""{model_name}"" ' 'model reference is missing.') model_subdirs = (subdir.name for subdir in md_path.parent.glob('*/**')) for model_subdir in model_subdirs: if not (md_path.parent / model_subdir / 'model.yml').exists(): continue # non-model folder if model_subdir not in device_support_lines: raise RuntimeError(f'{device_support_path_rel}: ' f'""{model_subdir}"" part reference of ' f'""{model_name}"" composite model is missing.') expected_md_path = Path('models', group, model_name, 'README.md') if md_path_rel != expected_md_path: raise RuntimeError(f'{md_path_rel}: unexpected documentation file,' ' should be {expected_md_path}') # FIXME: use the info dumper to query model information instead of # parsing the configs. We're not doing that now, because the info # dumper doesn't support composite models yet. model_yml_path = OMZ_ROOT / 'models' / group / model_name / 'model.yml' composite_model_yml_path = model_yml_path.with_name('composite-model.yml') if model_yml_path.exists(): expected_title = model_name with open(model_yml_path, 'rb') as f: config = yaml.safe_load(f) task_type = config['task_type'] elif composite_model_yml_path.exists(): expected_title = f'{model_name} (composite)' with open(composite_model_yml_path, 'rb') as f: config = yaml.safe_load(f) task_type = config['task_type'] else: logging.warning( '{}: no corresponding model.yml or composite-model.yml found; skipping' .format(md_path_rel)) continue if task_type not in task_type_elements: human_readable_task_type = HUMAN_READABLE_TASK_TYPES.get(task_type, task_type.replace('_', ' ').title()) task_type_elements[task_type] = add_page(output_root, group_element, title=f'{human_readable_task_type} Models') # All model names are unique, so we don't need to include the group # in the page ID. However, we do prefix ""model_"", so that model pages # don't conflict with any other pages in the omz_models namespace that # might be added later. page_id = 'omz_models_model_' + re.sub(r'[^a-zA-Z0-9]', '_', model_name) model_element = add_page(output_root, task_type_elements[task_type], id=page_id, path=md_path_rel) if model_element.attrib['title'] != expected_title: raise RuntimeError(f'{md_path_rel}: should have title ""{expected_title}""') sort_titles(group_element) title = 'Intel\'s Pre-Trained Models Device Support' if group == 'intel' else 'Public Pre-Trained Models Device Support' add_page(output_root, group_element, id=f'omz_models_{group}_device_support', path=f'models/{group}/device_support.md', title=title, index=0) ","def add_model_pages(output_root, parent_element, group, group_title): group_element = add_page(output_root, parent_element, title=group_title, id=f'omz_models_group_{group}', path=f'models/{group}/index.md') task_type_elements = {} device_support_path = OMZ_ROOT / 'models' / group / 'device_support.md' with device_support_path.open('r', encoding=""utf-8"") as device_support_file: raw_device_support = device_support_file.read() device_support_lines = re.findall(r'^\|\s\S+\s\|', raw_device_support, re.MULTILINE) device_support_lines = [device_support_line.strip(' |') for device_support_line in device_support_lines] for md_path in sorted(OMZ_ROOT.glob(f'models/{group}/*/**/*.md')): md_path_rel = md_path.relative_to(OMZ_ROOT) model_name = md_path_rel.parts[2] device_support_path_rel = device_support_path.relative_to(OMZ_ROOT) if model_name not in device_support_lines: if not (md_path.parent / 'composite-model.yml').exists(): raise RuntimeError(f'{device_support_path_rel}: ""{model_name}"" ' 'model reference is missing.') model_subdirs = (subdir.name for subdir in md_path.parent.glob('*/**')) for model_subdir in model_subdirs: if not (md_path.parent / model_subdir / 'model.yml').exists(): continue # non-model folder if model_subdir not in device_support_lines: raise RuntimeError(f'{device_support_path_rel}: ' f'""{model_subdir}"" part reference of ' f'""{model_name}"" composite model is missing.') expected_md_path = Path('models', group, model_name, 'README.md') if md_path_rel != expected_md_path: raise RuntimeError(f'{md_path_rel}: unexpected documentation file,' ' should be {expected_md_path}') # FIXME: use the info dumper to query model information instead of # parsing the configs. We're not doing that now, because the info # dumper doesn't support composite models yet. model_yml_path = OMZ_ROOT / 'models' / group / model_name / 'model.yml' composite_model_yml_path = model_yml_path.with_name('composite-model.yml') if model_yml_path.exists(): expected_title = model_name with open(model_yml_path, 'rb') as f: config = yaml.safe_load(f) task_type = config['task_type'] elif composite_model_yml_path.exists(): expected_title = f'{model_name} (composite)' with open(composite_model_yml_path, 'rb') as f: config = yaml.safe_load(f) task_type = config['task_type'] else: logging.warning( '{}: no corresponding model.yml or composite-model.yml found; skipping' .format(md_path_rel)) continue if task_type not in task_type_elements: human_readable_task_type = HUMAN_READABLE_TASK_TYPES.get(task_type, task_type.replace('_', ' ').title()) task_type_elements[task_type] = add_page(output_root, group_element, title=f'{human_readable_task_type} Models') # All model names are unique, so we don't need to include the group # in the page ID. However, we do prefix ""model_"", so that model pages # don't conflict with any other pages in the omz_models namespace that # might be added later. page_id = 'omz_models_model_' + re.sub(r'[^a-zA-Z0-9]', '_', model_name) model_element = add_page(output_root, task_type_elements[task_type], id=page_id, path=md_path_rel) if model_element.attrib['title'] != expected_title: raise RuntimeError(f'{md_path_rel}: should have title ""{expected_title}""') sort_titles(group_element) device_support_title = 'Intel\'s Pre-Trained Models Device Support' if group == 'intel' \ else 'Public Pre-Trained Models Device Support' add_page(output_root, group_element, id=f'omz_models_{group}_device_support', path=f'models/{group}/device_support.md', title=title, index=0) " 54042,"def is_neg_samp_feasible(edge_index: Tensor, sample_method: str, num_nodes=None, bipartite: bool = False, contains_neg_self_loops: bool = True, force_undirected: bool = False) -> bool: r""""""Check feasibility of negative sampling. Args: edge_index (LongTensor): The edge indices. sample_method (string): *i.e.*, :obj:`""structure""` or :obj:`""common""`. Set to :obj:`""structure""` when utilizing structured_negative_sampling, and set to :obj:`""common""` for other situations. num_nodes (int or Tuple[int, int], optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. It is independent of :attr:`edge_index`. (default: :obj:`True`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: bool """""" assert sample_method in ['common', 'structure'] if sample_method != 'structure': contains_neg_self_loops = False # remove duplicate edges for multi edges among two nodes if bipartite: force_undirected = False max_num_neighbor = num_nodes[1] num_nodes = num_nodes[0] else: num_nodes = maybe_num_nodes(edge_index, num_nodes) max_num_neighbor = num_nodes edge_index = torch.unique(edge_index.T, dim=0).T if not contains_neg_self_loops and not bipartite: edge_index, _ = remove_self_loops(edge_index) max_num_neighbor -= 1 if force_undirected: edge_index = to_undirected(edge_index) sender = edge_index[0] node_degree = degree(sender, num_nodes) if sample_method == 'structure': if torch.sub(node_degree, max_num_neighbor).nonzero().__len__() != num_nodes: warnings.warn('Cannot apply negative sampling', RuntimeWarning) return False elif sample_method == 'common': if torch.sub(node_degree, max_num_neighbor).nonzero().__len__() == 0: warnings.warn('Cannot apply negative sampling', RuntimeWarning) return False else: raise ValueError(""sample_method not in ['common', 'structure'] "") return True ","def is_neg_samp_feasible(edge_index: Tensor, sample_method: str, num_nodes=None, bipartite: bool = False, contains_neg_self_loops: bool = True, force_undirected: bool = False) -> bool: r""""""Check feasibility of negative sampling. Args: edge_index (LongTensor): The edge indices. sample_method (string): *i.e.*, :obj:`""structure""` or :obj:`""common""`. Set to :obj:`""structure""` when utilizing structured_negative_sampling, and set to :obj:`""common""` for other situations. num_nodes (int or Tuple[int, int], optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. It is independent of :attr:`edge_index`. (default: :obj:`True`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: bool """""" assert sample_method in ['common', 'structure'] if sample_method != 'structure': contains_neg_self_loops = False # remove duplicate edges for multi edges among two nodes if bipartite: force_undirected = False max_num_neighbor = num_nodes[1] num_nodes = num_nodes[0] else: num_nodes = maybe_num_nodes(edge_index, num_nodes) max_num_neighbor = num_nodes edge_index = torch.unique(edge_index.T, dim=0).T if not contains_neg_self_loops and not bipartite: edge_index, _ = remove_self_loops(edge_index) max_num_neighbor -= 1 if force_undirected: edge_index = to_undirected(edge_index) sender = edge_index[0] node_degree = degree(sender, num_nodes) if sample_method == 'structure': if torch.sub(node_degree, max_num_neighbor).nonzero().__len__() != num_nodes: warnings.warn('Cannot apply negative sampling', RuntimeWarning) return False else: if torch.sub(node_degree, max_num_neighbor).nonzero().__len__() == 0: warnings.warn('Cannot apply negative sampling', RuntimeWarning) return False return True " 24904,"def _loop_exits_early(loop): """""" Returns true if a loop may end with a break or return statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may end with a break or return statement, False otherwise. """""" loop_nodes = (nodes.For, nodes.While) definition_nodes = (nodes.FunctionDef, nodes.ClassDef) break_nodes = (nodes.Break, nodes.Return) inner_loop_nodes = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(break_nodes, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes and not _node_in_orelse(loop, _node) ) ","def _loop_exits_early(loop: Union[nodes.While, nodes.For]) -> bool: """""" Returns true if a loop may end with a break or return statement. Args: loop (astroid.For, astroid.While): the loop node inspected. Returns: bool: True if the loop may end with a break or return statement, False otherwise. """""" loop_nodes = (nodes.For, nodes.While) definition_nodes = (nodes.FunctionDef, nodes.ClassDef) break_nodes = (nodes.Break, nodes.Return) inner_loop_nodes = [ _node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if _node != loop ] return any( _node for _node in loop.nodes_of_class(break_nodes, skip_klass=definition_nodes) if _get_break_loop_node(_node) not in inner_loop_nodes and not _node_in_orelse(loop, _node) ) " 57749,"def ip_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: ips = argToList(args.get('ip')) if len(ips) == 0: raise ValueError('IP(s) not specified') command_results: List[CommandResults] = [] for ip in ips: ip_data = client.get_ip_reputation(ip, api_key) # remove the array indicator_ip = ip_data['indicator'] reputation = ip_data['risk'] score = convert_to_xsoar_severity(reputation) # Create the DBotScore structure first using the Common.DBotScore class. dbot_score = Common.DBotScore( indicator=indicator_ip, indicator_type=DBotScoreType.IP, integration_name='Pulsedive', score=score, malicious_description=f'Pulsedive returned reputation {reputation}' ) # Create the IP Standard Context structure using Common.IP and add # dbot_score to it. ip_standard_context = Common.IP( ip=indicator_ip, dbot_score=dbot_score ) ip_data.pop('objects') ip_data.pop('nir') command_results.append(CommandResults( readable_output=tableToMarkdown('IP List', ip_data), outputs_prefix='Pulsedive.IP', outputs_key_field='indicator', outputs=ip_data, indicator=ip_standard_context )) return command_results ","def ip_reputation_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: ips = argToList(args.get('ip')) if len(ips) == 0: raise ValueError('IP(s) not specified') command_results: List[CommandResults] = [] for ip in ips: ip_data = client.get_ip_reputation(ip, api_key) # remove the array indicator_ip = ip_data['indicator'] reputation = ip_data['risk'] score = convert_to_xsoar_severity(reputation) # Create the DBotScore structure first using the Common.DBotScore class. dbot_score = Common.DBotScore( indicator=indicator_ip, indicator_type=DBotScoreType.IP, integration_name='Pulsedive', score=score, malicious_description=f'Pulsedive returned reputation {reputation}' ) # Create the IP Standard Context structure using Common.IP and add # dbot_score to it. ip_standard_context = Common.IP( ip=indicator_ip, dbot_score=dbot_score ) ip_data.pop('objects', None) ip_data.pop('nir', None) command_results.append(CommandResults( readable_output=tableToMarkdown('IP List', ip_data), outputs_prefix='Pulsedive.IP', outputs_key_field='indicator', outputs=ip_data, indicator=ip_standard_context )) return command_results " 42357,"def validate_tag_embed_author(author: Any) -> None: """"""Raises a ValidationError if the given author is invalid."""""" field_validators = { 'name': ( MinLengthValidator( limit_value=1, message=""Embed author name must not be empty."" ), MaxLengthValidator(limit_value=256) ), 'url': (), 'icon_url': (), 'proxy_icon_url': () } if not isinstance(author, Mapping): raise ValidationError(""Embed author must be a mapping."") for field_name, value in author.items(): if field_name not in field_validators: raise ValidationError(f""Unknown embed author field: {field_name!r}."") for validator in field_validators[field_name]: validator(value) ","def validate_tag_embed_author(author: Dict[str, str]) -> None: """"""Raises a ValidationError if the given author is invalid."""""" field_validators = { 'name': ( MinLengthValidator( limit_value=1, message=""Embed author name must not be empty."" ), MaxLengthValidator(limit_value=256) ), 'url': (), 'icon_url': (), 'proxy_icon_url': () } if not isinstance(author, Mapping): raise ValidationError(""Embed author must be a mapping."") for field_name, value in author.items(): if field_name not in field_validators: raise ValidationError(f""Unknown embed author field: {field_name!r}."") for validator in field_validators[field_name]: validator(value) " 59114,"def upgrade_config(config: ConfigType, target: int = CURRENT_CONFIG_VERSION) -> ConfigType: """"""Run the registered configuration migrations up to the target version. :param config: the configuration dictionary :return: the migrated configuration dictionary """""" current = get_current_version(config) used = [] while current < target: current = get_current_version(config) try: migrator = next(m for m in _MIGRATION_LOOKUPS if m.initial == current) except StopIteration: raise exceptions.ConfigurationError(f'No migration found to upgrade version {current}') if migrator in used: raise exceptions.ConfigurationError(f'Circular migration detected, upgrading to {target}') used.append(migrator) migrator().upgrade(config) config.setdefault('CONFIG_VERSION', {})['CURRENT'] = current = migrator.final return config ","def upgrade_config(config: ConfigType, target: int = CURRENT_CONFIG_VERSION) -> ConfigType: """"""Run the registered configuration migrations up to the target version. :param config: the configuration dictionary :return: the migrated configuration dictionary """""" current = get_current_version(config) used = [] while current < target: current = get_current_version(config) try: migrator = next(m for m in _MIGRATION_LOOKUPS if m.initial == current) except StopIteration: raise exceptions.ConfigurationError(f'No migration found to upgrade version {current}') if migrator in used: raise exceptions.ConfigurationError(f'Circular migration detected, upgrading to {target}') used.append(migrator) migrator().upgrade(config) current = migrator.final config.setdefault('CONFIG_VERSION', {})['CURRENT'] = current return config " 28314,"def test_atomic_creation(experiment): """""""" Test that dataset creation is atomic. Test for https://github.com/QCoDeS/Qcodes/issues/1444 """""" def just_throw(*args): raise RuntimeError(""This breaks adding metadata"") # first we patch add_meta_data to throw an exception # if create_data is not atomic this would create a partial # run in the db. Causing the next create_run to fail with patch( ""qcodes.dataset.sqlite.queries.add_data_to_dynamic_columns"", new=just_throw ): x = ParamSpec(""x"", ""numeric"") t = ParamSpec(""t"", ""numeric"") y = ParamSpec(""y"", ""numeric"", depends_on=[""x"", ""t""]) with pytest.raises( RuntimeError, match=""Rolling back due to unhandled exception"" ) as e: mut_queries.create_run( experiment.conn, experiment.exp_id, name=""testrun"", guid=generate_guid(), parameters=[x, t, y], metadata={""a"": 1}, ) assert error_caused_by(e, ""This breaks adding metadata"") # since we are starting from an empty database and the above transaction # should be rolled back there should be no runs in the run table runs = mut_conn.transaction(experiment.conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 0 with shadow_conn(experiment.path_to_db) as new_conn: runs = mut_conn.transaction(new_conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 0 # if the above was not correctly rolled back we # expect the next creation of a run to fail mut_queries.create_run(experiment.conn, experiment.exp_id, name='testrun', guid=generate_guid(), parameters=[x, t, y], metadata={'a': 1}) runs = mut_conn.transaction(experiment.conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 1 with shadow_conn(experiment.path_to_db) as new_conn: runs = mut_conn.transaction(new_conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 1 ","def test_atomic_creation(experiment): """""""" Test that dataset creation is atomic. Test for https://github.com/QCoDeS/Qcodes/issues/1444 """""" def just_throw(*args): raise RuntimeError(""This breaks adding metadata"") # first we patch add_data_to_dynamic_columns to throw an exception # if create_data is not atomic this would create a partial # run in the db. Causing the next create_run to fail with patch( ""qcodes.dataset.sqlite.queries.add_data_to_dynamic_columns"", new=just_throw ): x = ParamSpec(""x"", ""numeric"") t = ParamSpec(""t"", ""numeric"") y = ParamSpec(""y"", ""numeric"", depends_on=[""x"", ""t""]) with pytest.raises( RuntimeError, match=""Rolling back due to unhandled exception"" ) as e: mut_queries.create_run( experiment.conn, experiment.exp_id, name=""testrun"", guid=generate_guid(), parameters=[x, t, y], metadata={""a"": 1}, ) assert error_caused_by(e, ""This breaks adding metadata"") # since we are starting from an empty database and the above transaction # should be rolled back there should be no runs in the run table runs = mut_conn.transaction(experiment.conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 0 with shadow_conn(experiment.path_to_db) as new_conn: runs = mut_conn.transaction(new_conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 0 # if the above was not correctly rolled back we # expect the next creation of a run to fail mut_queries.create_run(experiment.conn, experiment.exp_id, name='testrun', guid=generate_guid(), parameters=[x, t, y], metadata={'a': 1}) runs = mut_conn.transaction(experiment.conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 1 with shadow_conn(experiment.path_to_db) as new_conn: runs = mut_conn.transaction(new_conn, 'SELECT run_id FROM runs').fetchall() assert len(runs) == 1 " 1426,"def maybe_cythonize_extensions(top_path, config): """"""Tweaks for building extensions between release and development mode."""""" openmp_status = check_openmp_support() if openmp_status == ""explicitly disabled"": # SKLEARN_NO_OPENMP is set with_openmp = False explicitly_disabled = True elif openmp_status in (""unrelated fail"", ""unsupported""): # either build fails even without OpenMP # or build fails with openmp and SKLEARN_NO_OPENMP is not set with_openmp = False explicitly_disabled = False os.environ[""SKLEARN_NO_OPENMP""] = ""True"" else: with_openmp = True explicitly_disabled = False is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO')) if is_release: build_from_c_and_cpp_files(config.ext_modules) else: message = ('Please install cython with a version >= {0} in order ' 'to build a scikit-learn development version.').format( CYTHON_MIN_VERSION) try: import Cython if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += ' Your version of Cython was {0}.'.format( Cython.__version__) raise ValueError(message) from Cython.Build import cythonize except ImportError as exc: exc.args += (message,) raise n_jobs = 1 with contextlib.suppress(ImportError): import joblib if LooseVersion(joblib.__version__) > LooseVersion(""0.13.0""): # earlier joblib versions don't account for CPU affinity # constraints, and may over-estimate the number of available # CPU particularly in CI (cf loky#114) n_jobs = joblib.effective_n_jobs() config.ext_modules = cythonize( config.ext_modules, nthreads=n_jobs, compile_time_env={'SKLEARN_OPENMP_SUPPORTED': with_openmp, 'OPENMP_EXPLICIT_DISABLED': explicitly_disabled}, compiler_directives={'language_level': 3}) ","def maybe_cythonize_extensions(top_path, config): """"""Tweaks for building extensions between release and development mode."""""" openmp_status = check_openmp_support() if openmp_status == ""explicitly disabled"": # SKLEARN_NO_OPENMP is set with_openmp = False explicitly_disabled = True elif openmp_status in (""unrelated fail"", ""unsupported""): # either build fails even without OpenMP # or build fails with openmp and SKLEARN_NO_OPENMP is not set with_openmp = False explicitly_disabled = False os.environ[""SKLEARN_NO_OPENMP""] = ""True"" else: with_openmp = True explicitly_disabled = False is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO')) if is_release: build_from_c_and_cpp_files(config.ext_modules) else: message = ('Please install cython with a version >= {0} in order ' 'to build a scikit-learn development version.').format( CYTHON_MIN_VERSION) try: import Cython if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += ' Your version of Cython was {0}.'.format( Cython.__version__) raise ValueError(message) from Cython.Build import cythonize except ImportError as exc: exc.args += (message,) raise n_jobs = 1 with contextlib.suppress(ImportError): import joblib if LooseVersion(joblib.__version__) > LooseVersion(""0.13.0""): # earlier joblib versions don't account for CPU affinity # constraints, and may over-estimate the number of available # CPU particularly in CI (cf loky#114) n_jobs = joblib.effective_n_jobs() config.ext_modules = cythonize( config.ext_modules, nthreads=n_jobs, compile_time_env={'SKLEARN_OPENMP_SUPPORTED': with_openmp, 'SKLEARN_OPENMP_EXPLICIT_DISABLED': explicitly_disabled}, compiler_directives={'language_level': 3}) " 44075,"def contract_tensors( tensors: Sequence, communication_graph: MultiDiGraph, prepare_nodes: Sequence[Sequence[PrepareNode]], measure_nodes: Sequence[Sequence[MeasureNode]], use_opt_einsum: bool = False, ): """"""Contract tensors according to the edges specified in the communication graph. This operation is differentiable. The ``prepare_nodes`` and ``measure_nodes`` arguments are both sequences of size ``len(communication_graph.nodes)`` that describe the order of indices in the ``tensors`` with respect to to the :class:`~.PrepareNode` and :class`~.MeasureNode` edges in the communication graph. Args: tensors (Sequence): the tensors to be contracted communication_graph (MultiDiGraph): the communication graph determining connectivity between the tensors prepare_nodes (Sequence[PrepareNode]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of preparation indices in each tensor measure_nodes (Sequence[MeasureNode]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of measurement indices in each tensor use_opt_einsum (bool): Determines whether to use the [opt_einsum](https://dgasmith.github.io/opt_einsum/) package. This package is useful for tensor contractions of large networks but must be installed separately using, e.g., ``pip install opt_einsum``. Returns: float or array-like: the result of contracting the tensor network **Example** .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :doc:`transforms ` page for more details. We first set up the tensors and their corresponding :class:`~.PrepareNode` and :class`~.MeasureNode` orderings: .. code-block:: python t = [np.arange(4), np.arange(4, 8)] p = [[], [qcut.PrepareNode(wires=0)]] m = [[qcut.MeasureNode(wires=0)], []] The communication graph describing edges in the tensor network must also be constructed: .. code-block:: python g = MultiDiGraph([(0, 1, {""pair"": (m[0][0], p[1][0])})]) The network can then be contracted using: >>> qcut.contract_tensors(t, g, p, m) 38 """""" # pylint: disable=import-outside-toplevel if use_opt_einsum: try: from opt_einsum import contract, get_symbol except ImportError as e: raise ImportError( ""The opt_einsum package is required when use_opt_einsum is set to "" ""True in the contract_tensors function. This package can be "" ""installed using:\npip install opt_einsum"" ) from e else: from string import ascii_letters as symbols from pennylane.math import einsum as contract def get_symbol(i): if i >= len(symbols): raise ValueError( ""Set the use_opt_einsum argument to True when applying more than "" f""{len(symbols)} wire cuts to a circuit"" ) return symbols[i] ctr = 0 tensor_indxs = [""""] * len(communication_graph.nodes) meas_map = {} for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)): predecessors = communication_graph.pred[node] for p in prep: for _, pred_edges in predecessors.items(): for pred_edge in pred_edges.values(): meas_op, prep_op = pred_edge[""pair""] if p is prep_op: symb = get_symbol(ctr) ctr += 1 tensor_indxs[i] += symb meas_map[meas_op] = symb for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)): successors = communication_graph.succ[node] for m in meas: for _, succ_edges in successors.items(): for succ_edge in succ_edges.values(): meas_op, _ = succ_edge[""pair""] if m is meas_op: symb = meas_map[meas_op] tensor_indxs[i] += symb eqn = "","".join(tensor_indxs) kwargs = {} if use_opt_einsum else {""like"": tensors[0]} return contract(eqn, *tensors, **kwargs) ","def contract_tensors( tensors: Sequence, communication_graph: MultiDiGraph, prepare_nodes: Sequence[Sequence[PrepareNode]], measure_nodes: Sequence[Sequence[MeasureNode]], use_opt_einsum: bool = False, ): """"""Contract tensors according to the edges specified in the communication graph. This operation is differentiable. The ``prepare_nodes`` and ``measure_nodes`` arguments are both sequences of size ``len(communication_graph.nodes)`` that describe the order of indices in the ``tensors`` with respect to to the :class:`~.PrepareNode` and :class`~.MeasureNode` edges in the communication graph. Args: tensors (Sequence): the tensors to be contracted communication_graph (nx.MultiDiGraph): the communication graph determining connectivity between the tensors prepare_nodes (Sequence[PrepareNode]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of preparation indices in each tensor measure_nodes (Sequence[MeasureNode]): a sequence of size ``len(communication_graph.nodes)`` that determines the order of measurement indices in each tensor use_opt_einsum (bool): Determines whether to use the [opt_einsum](https://dgasmith.github.io/opt_einsum/) package. This package is useful for tensor contractions of large networks but must be installed separately using, e.g., ``pip install opt_einsum``. Returns: float or array-like: the result of contracting the tensor network **Example** .. note:: This function is designed for use as part of the circuit cutting workflow. Check out the :doc:`transforms ` page for more details. We first set up the tensors and their corresponding :class:`~.PrepareNode` and :class`~.MeasureNode` orderings: .. code-block:: python t = [np.arange(4), np.arange(4, 8)] p = [[], [qcut.PrepareNode(wires=0)]] m = [[qcut.MeasureNode(wires=0)], []] The communication graph describing edges in the tensor network must also be constructed: .. code-block:: python g = MultiDiGraph([(0, 1, {""pair"": (m[0][0], p[1][0])})]) The network can then be contracted using: >>> qcut.contract_tensors(t, g, p, m) 38 """""" # pylint: disable=import-outside-toplevel if use_opt_einsum: try: from opt_einsum import contract, get_symbol except ImportError as e: raise ImportError( ""The opt_einsum package is required when use_opt_einsum is set to "" ""True in the contract_tensors function. This package can be "" ""installed using:\npip install opt_einsum"" ) from e else: from string import ascii_letters as symbols from pennylane.math import einsum as contract def get_symbol(i): if i >= len(symbols): raise ValueError( ""Set the use_opt_einsum argument to True when applying more than "" f""{len(symbols)} wire cuts to a circuit"" ) return symbols[i] ctr = 0 tensor_indxs = [""""] * len(communication_graph.nodes) meas_map = {} for i, (node, prep) in enumerate(zip(communication_graph.nodes, prepare_nodes)): predecessors = communication_graph.pred[node] for p in prep: for _, pred_edges in predecessors.items(): for pred_edge in pred_edges.values(): meas_op, prep_op = pred_edge[""pair""] if p is prep_op: symb = get_symbol(ctr) ctr += 1 tensor_indxs[i] += symb meas_map[meas_op] = symb for i, (node, meas) in enumerate(zip(communication_graph.nodes, measure_nodes)): successors = communication_graph.succ[node] for m in meas: for _, succ_edges in successors.items(): for succ_edge in succ_edges.values(): meas_op, _ = succ_edge[""pair""] if m is meas_op: symb = meas_map[meas_op] tensor_indxs[i] += symb eqn = "","".join(tensor_indxs) kwargs = {} if use_opt_einsum else {""like"": tensors[0]} return contract(eqn, *tensors, **kwargs) " 48051,"def test_toCompound(simple_assy, nested_assy): c0 = simple_assy.toCompound() assert isinstance(c0, cq.Compound) assert len(c0.Solids()) == 4 c1 = nested_assy.toCompound() assert isinstance(c1, cq.Compound) assert len(c1.Solids()) == 4 # check nested assy location appears in compound # create four boxes, stack them ontop of each other, check highest face is in final compound box0 = cq.Workplane().box(1, 1, 3, centered=(True, True, False)) box1 = cq.Workplane().box(1, 1, 4) box2 = cq.Workplane().box(1, 1, 5) box3 = cq.Workplane().box(1, 1, 6) # top level assy assy0 = cq.Assembly(box0, name=""box0"") assy0.add(box1, name=""box1"") assy0.constrain(""box0@faces@>Z"", ""box1@faces@Z"", ""box3@faces@Z"", ""assy1/box2@faces@Z"", ""box1@faces@Z"", ""box3@faces@Z"", ""assy1/box2@faces@>> linspace_int(10, 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> linspace_int(10, 4) array([0, 2, 5, 7]) >>> linspace_int(10, 5) array([0, 2, 4, 6, 8]) >>> """""" if periodic: jj = np.linspace(0, Nx, Ny+1)[:-1] else: jj = np.linspace(0, Nx-1, Ny) jj = jj.astype(int) return jj ","def linspace_int(Nx, Ny, periodic=True): """"""Provide a range of `Ny` equispaced integers between `0` and `Nx-1`. Parameters ---------- Nx: int Range of integers Ny: int Number of integers periodic: bool, optional Whether the vector is periodic. Determines if `Nx == 0`. Default: True Returns ------- vector Generated vectors. Examples -------- >>> linspace_int(10, 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> linspace_int(10, 4) array([0, 2, 5, 7]) >>> linspace_int(10, 5) array([0, 2, 4, 6, 8]) >>> """""" if periodic: jj = np.linspace(0, Nx, Ny+1)[:-1] else: jj = np.linspace(0, Nx-1, Ny) jj = jj.astype(int) return jj " 37652,"def get_entangler_map( num_block_qubits: int, num_circuit_qubits: int, entanglement: str, entanglement_gates: List[Tuple] = None, offset: int = 0, ) -> List[Sequence[int]]: """"""Get an entangler map for an arbitrary number of qubits. Args: num_block_qubits: The number of qubits of the entangling block. num_circuit_qubits: The number of qubits of the circuit. entanglement: The entanglement strategy. entanglement_gates: List of tuples of entanglement gates and their counts. Used for optimization. offset: The block offset, can be used if the entanglements differ per block. See mode ``sca`` for instance. Returns: The entangler map using mode ``entanglement`` to scatter a block of ``num_block_qubits`` qubits on ``num_circuit_qubits`` qubits. Raises: ValueError: If the entanglement mode ist not supported. """""" n, m = num_circuit_qubits, num_block_qubits if m > n: raise ValueError( ""The number of block qubits must be smaller or equal to the number of "" ""qubits in the circuit."" ) if entanglement == ""pairwise"" and num_block_qubits != 2: raise ValueError(""Pairwise entanglement is only defined for blocks of 2 qubits."") if entanglement == ""full"" or entanglement == ""full_explicit"": # Optimization for CX entanglement_block of size 2, containing only 'cx' gates if entanglement == ""full"" and m == 2 and entanglement_gates == [(""cx"", 1)]: return [(n - i - 2, n - i - 1) for i in range(n - 1)] return list(combinations(list(range(n)), m)) if entanglement in [""linear"", ""circular"", ""sca"", ""pairwise""]: linear = [tuple(range(i, i + m)) for i in range(n - m + 1)] # if the number of block qubits is 1, we don't have to add the 'circular' part if entanglement == ""linear"" or m == 1: return linear if entanglement == ""pairwise"": return linear[::2] + linear[1::2] # circular equals linear plus top-bottom entanglement (if there's space for it) if n > m: circular = [tuple(range(n - m + 1, n)) + (0,)] + linear else: circular = linear if entanglement == ""circular"": return circular # sca is circular plus shift and reverse shifted = circular[-offset:] + circular[:-offset] if offset % 2 == 1: # if odd, reverse the qubit indices sca = [ind[::-1] for ind in shifted] else: sca = shifted return sca else: raise ValueError(f""Unsupported entanglement type: {entanglement}"") ","def get_entangler_map( num_block_qubits: int, num_circuit_qubits: int, entanglement: str, entanglement_gates: List[Tuple] = None, offset: int = 0, ) -> List[Sequence[int]]: """"""Get an entangler map for an arbitrary number of qubits. Args: num_block_qubits: The number of qubits of the entangling block. num_circuit_qubits: The number of qubits of the circuit. entanglement: The entanglement strategy. entanglement_gates: List of tuples of entanglement gates and their counts. Used for optimization. offset: The block offset, can be used if the entanglements differ per block. See mode ``sca`` for instance. Returns: The entangler map using mode ``entanglement`` to scatter a block of ``num_block_qubits`` qubits on ``num_circuit_qubits`` qubits. Raises: ValueError: If the entanglement mode ist not supported. """""" n, m = num_circuit_qubits, num_block_qubits if m > n: raise ValueError( ""The number of block qubits must be smaller or equal to the number of "" ""qubits in the circuit."" ) if entanglement == ""pairwise"" and num_block_qubits != 2: raise ValueError(""Pairwise entanglement is only defined for blocks of 2 qubits."") if entanglement in [""full"", ""full_explicit""]: # Optimization for CX entanglement_block of size 2, containing only 'cx' gates if entanglement == ""full"" and m == 2 and entanglement_gates == [(""cx"", 1)]: return [(n - i - 2, n - i - 1) for i in range(n - 1)] return list(combinations(list(range(n)), m)) if entanglement in [""linear"", ""circular"", ""sca"", ""pairwise""]: linear = [tuple(range(i, i + m)) for i in range(n - m + 1)] # if the number of block qubits is 1, we don't have to add the 'circular' part if entanglement == ""linear"" or m == 1: return linear if entanglement == ""pairwise"": return linear[::2] + linear[1::2] # circular equals linear plus top-bottom entanglement (if there's space for it) if n > m: circular = [tuple(range(n - m + 1, n)) + (0,)] + linear else: circular = linear if entanglement == ""circular"": return circular # sca is circular plus shift and reverse shifted = circular[-offset:] + circular[:-offset] if offset % 2 == 1: # if odd, reverse the qubit indices sca = [ind[::-1] for ind in shifted] else: sca = shifted return sca else: raise ValueError(f""Unsupported entanglement type: {entanglement}"") " 33850,"def test_shutdown_destructor(serve_instance): signal = SignalActor.remote() @serve.deployment class A: def __del__(self): signal.send.remote() A.deploy() A.delete() ray.get(signal.wait.remote(), timeout=10) # If the desctructor errored, it should be logged but also cleaned up. @serve.deployment class B: def __del__(self): raise RuntimeError(""Opps"") B.deploy() B.delete() ","def test_shutdown_destructor(serve_instance): signal = SignalActor.remote() @serve.deployment class A: def __del__(self): signal.send.remote() A.deploy() A.delete() ray.get(signal.wait.remote(), timeout=10) # If the destructor errored, it should be logged but also cleaned up. @serve.deployment class B: def __del__(self): raise RuntimeError(""Opps"") B.deploy() B.delete() " 32384,"def main(): # get command and args command = demisto.command() args = getArgs() # initialize common args api_key = demisto.params().get('api_key') account_uuid = demisto.params().get('account_uuid') global HEADERS HEADERS = { 'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json', } # attempt command execution try: if command == 'test-module': response = sendRequest('GET', 'Sensors', 'sensors') demisto.results('ok') if command == 'fetch-incidents': # default first fetch to -7days first_fetch_time = datetime.now() - timedelta(days=7) max_results = arg_to_number( arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False ) next_run, incidents = fetchIncidents( account_uuid=account_uuid, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_time ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif command == 'insight-get-events': if args['response_type'] == ""metadata"": response_type = ""metadata"" elif args['response_type'] == ""aggregations"": pattern = r""^.*[Gg][Rr][Oo][Uu][Pp]\s+[Bb][Yy].*$"" if not re.search(pattern, args['query']): demisto.results(""Error: No 'group by' statement in query. Aggregation requires a 'group by' statement."") else: response_type = ""aggregations"" else: response_type = ""events"" args.pop('response_type') response = sendRequest('POST', 'Events', None, args) response = formatEvents(response, response_type) if response_type in (""metadata"", ""aggregations""): responseToEntry(response, 'Events', 'Data') else: responseToEntry(response, 'Events', 'Events') elif command == 'insight-get-history': response = sendRequest('GET', 'Events', 'history') responseToEntry(response, 'UserQueryHistory', 'History') elif command == 'insight-get-saved-searches': response = sendRequest('GET', 'Events', 'saved') responseToEntry(response, 'SavedSearches', 'Saved Queries') elif command == 'insight-get-sensors': response = sendRequest('GET', 'Sensors', 'sensors') responseToEntry(response, 'Sensors', 'Sensors') elif command == 'insight-get-devices': response = sendRequest('GET', 'Sensors', 'devices') responseToEntry(response, 'Devices', 'Device List') elif command == 'insight-get-tasks': if 'task_uuid' in args: endpoint = 'pcaptasks/' + args['task_uuid'] response = sendRequest('GET', 'Sensors', endpoint) responseToEntry(response, 'Tasks', 'PCAP Task') else: response = sendRequest('GET', 'Sensors', 'pcaptasks') responseToEntry(response, 'Tasks', 'PCAPTasks') elif command == 'insight-create-task': sensor_ids = [args['sensor_ids']] args.pop('sensor_ids') args['sensor_ids'] = sensor_ids response = sendRequest('POST', 'Sensors', 'pcaptasks', args) demisto.results(""Task created successfully"") elif command == 'insight-get-detections': response = sendRequest('GET', 'Detections', 'detections', None, encodeArgsToURL(args)) if response['total_count'] > MAX_DETECTIONS: if 'limit' not in args or int(args['limit']) > MAX_DETECTIONS: # pull the remaining detections incrementally response = getDetectionsInc(response, args) # filter out training detections detections = [] for detection in response['detections']: if detection['account_uuid'] != TRAINING_ACC: detections.append(detection) response['detections'] = detections if 'include' in args: if args['include'] == 'rules': response = addDetectionRules(response) responseToEntry(response, 'Detections', 'Detections') elif command == 'insight-get-detection-rules': response = sendRequest('GET', 'Detections', 'rules', None, encodeArgsToURL(args)) responseToEntry(response, 'Rules', 'Rules') elif command == 'insight-get-detection-rule-events': rule_uuid = args['rule_uuid'] endpoint = ""rules/"" + rule_uuid + ""/events"" args.pop('rule_uuid') response = sendRequest('GET', 'Detections', endpoint, None, encodeArgsToURL(args)) responseToEntry(response, 'Detections', 'Events') elif command == 'insight-resolve-detection': endpoint = ""detections/"" + args['detection_uuid'] + ""/resolve"" body = {""resolution"": args['resolution'], ""resolution_comment"": args['resolution_comment']} sendRequest('PUT', 'Detections', endpoint, body, None) demisto.results(""Detection resolved successfully"") elif command == 'insight-create-detection-rule': run_accts = [args['run_account_uuids']] dev_ip_fields = [args['device_ip_fields']] args.pop('run_account_uuids') args.pop('device_ip_fields') args['run_account_uuids'] = run_accts args['device_ip_fields'] = dev_ip_fields sendRequest('POST', 'Detections', 'rules', args, None) demisto.results(""Rule created successfully"") elif command == 'insight-get-entity-summary': endpoint = args['entity'] + ""/summary"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.Summary', 'Summary') elif command == 'insight-get-entity-pdns': endpoint = args['entity'] + ""/pdns"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.PDNS', 'PassiveDNS') elif command == 'insight-get-entity-dhcp': endpoint = args['entity'] + ""/dhcp"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.DHCP', 'DHCP') elif command == 'insight-get-entity-file': endpoint = args['hash'] + ""/file"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.File', 'File') elif command == 'insight-get-telemetry-events': response = sendRequest('GET', 'Sensors', 'telemetry/events', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Events', 'Data') elif command == 'insight-get-telemetry-network': response = sendRequest('GET', 'Sensors', 'telemetry/network', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Network', 'Data') elif command == 'insight-get-telemetry-packetstats': response = sendRequest('GET', 'Sensors', 'telemetry/packetstats', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Packetstats', 'Data') # catch exceptions except Exception as e: return_error(str(e)) ","def main(): # get command and args command = demisto.command() args: Dict[str, Any] = demisto.args() # initialize common args api_key = demisto.params().get('api_key') account_uuid = demisto.params().get('account_uuid') global HEADERS HEADERS = { 'Authorization': 'IBToken ' + api_key, 'User-Agent': 'Cortex_Insight.v3', 'Content-Type': 'application/json', } # attempt command execution try: if command == 'test-module': response = sendRequest('GET', 'Sensors', 'sensors') demisto.results('ok') if command == 'fetch-incidents': # default first fetch to -7days first_fetch_time = datetime.now() - timedelta(days=7) max_results = arg_to_number( arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False ) next_run, incidents = fetchIncidents( account_uuid=account_uuid, max_results=max_results, last_run=demisto.getLastRun(), first_fetch_time=first_fetch_time ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif command == 'insight-get-events': if args['response_type'] == ""metadata"": response_type = ""metadata"" elif args['response_type'] == ""aggregations"": pattern = r""^.*[Gg][Rr][Oo][Uu][Pp]\s+[Bb][Yy].*$"" if not re.search(pattern, args['query']): demisto.results(""Error: No 'group by' statement in query. Aggregation requires a 'group by' statement."") else: response_type = ""aggregations"" else: response_type = ""events"" args.pop('response_type') response = sendRequest('POST', 'Events', None, args) response = formatEvents(response, response_type) if response_type in (""metadata"", ""aggregations""): responseToEntry(response, 'Events', 'Data') else: responseToEntry(response, 'Events', 'Events') elif command == 'insight-get-history': response = sendRequest('GET', 'Events', 'history') responseToEntry(response, 'UserQueryHistory', 'History') elif command == 'insight-get-saved-searches': response = sendRequest('GET', 'Events', 'saved') responseToEntry(response, 'SavedSearches', 'Saved Queries') elif command == 'insight-get-sensors': response = sendRequest('GET', 'Sensors', 'sensors') responseToEntry(response, 'Sensors', 'Sensors') elif command == 'insight-get-devices': response = sendRequest('GET', 'Sensors', 'devices') responseToEntry(response, 'Devices', 'Device List') elif command == 'insight-get-tasks': if 'task_uuid' in args: endpoint = 'pcaptasks/' + args['task_uuid'] response = sendRequest('GET', 'Sensors', endpoint) responseToEntry(response, 'Tasks', 'PCAP Task') else: response = sendRequest('GET', 'Sensors', 'pcaptasks') responseToEntry(response, 'Tasks', 'PCAPTasks') elif command == 'insight-create-task': sensor_ids = [args['sensor_ids']] args.pop('sensor_ids') args['sensor_ids'] = sensor_ids response = sendRequest('POST', 'Sensors', 'pcaptasks', args) demisto.results(""Task created successfully"") elif command == 'insight-get-detections': response = sendRequest('GET', 'Detections', 'detections', None, encodeArgsToURL(args)) if response['total_count'] > MAX_DETECTIONS: if 'limit' not in args or int(args['limit']) > MAX_DETECTIONS: # pull the remaining detections incrementally response = getDetectionsInc(response, args) # filter out training detections detections = [] for detection in response['detections']: if detection['account_uuid'] != TRAINING_ACC: detections.append(detection) response['detections'] = detections if 'include' in args: if args['include'] == 'rules': response = addDetectionRules(response) responseToEntry(response, 'Detections', 'Detections') elif command == 'insight-get-detection-rules': response = sendRequest('GET', 'Detections', 'rules', None, encodeArgsToURL(args)) responseToEntry(response, 'Rules', 'Rules') elif command == 'insight-get-detection-rule-events': rule_uuid = args['rule_uuid'] endpoint = ""rules/"" + rule_uuid + ""/events"" args.pop('rule_uuid') response = sendRequest('GET', 'Detections', endpoint, None, encodeArgsToURL(args)) responseToEntry(response, 'Detections', 'Events') elif command == 'insight-resolve-detection': endpoint = ""detections/"" + args['detection_uuid'] + ""/resolve"" body = {""resolution"": args['resolution'], ""resolution_comment"": args['resolution_comment']} sendRequest('PUT', 'Detections', endpoint, body, None) demisto.results(""Detection resolved successfully"") elif command == 'insight-create-detection-rule': run_accts = [args['run_account_uuids']] dev_ip_fields = [args['device_ip_fields']] args.pop('run_account_uuids') args.pop('device_ip_fields') args['run_account_uuids'] = run_accts args['device_ip_fields'] = dev_ip_fields sendRequest('POST', 'Detections', 'rules', args, None) demisto.results(""Rule created successfully"") elif command == 'insight-get-entity-summary': endpoint = args['entity'] + ""/summary"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.Summary', 'Summary') elif command == 'insight-get-entity-pdns': endpoint = args['entity'] + ""/pdns"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.PDNS', 'PassiveDNS') elif command == 'insight-get-entity-dhcp': endpoint = args['entity'] + ""/dhcp"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.DHCP', 'DHCP') elif command == 'insight-get-entity-file': endpoint = args['hash'] + ""/file"" response = sendRequest('GET', 'Entity', endpoint, None, None) responseToEntry(response, 'Entity.File', 'File') elif command == 'insight-get-telemetry-events': response = sendRequest('GET', 'Sensors', 'telemetry/events', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Events', 'Data') elif command == 'insight-get-telemetry-network': response = sendRequest('GET', 'Sensors', 'telemetry/network', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Network', 'Data') elif command == 'insight-get-telemetry-packetstats': response = sendRequest('GET', 'Sensors', 'telemetry/packetstats', None, encodeArgsToURL(args)) responseToEntry(response, 'Telemetry.Packetstats', 'Data') # catch exceptions except Exception as e: return_error(str(e)) " 31550,"def fetch_incidents_long_running_events( client: QRadarClient, incident_type, user_query, ip_enrich, asset_enrich, fetch_mode, events_columns, events_limit, ): last_run = get_integration_context(SYNC_CONTEXT) offense_id = last_run[""id""] if last_run and ""id"" in last_run else 0 raw_offenses = fetch_raw_offenses(client, offense_id, user_query) if len(raw_offenses) == 0: return if isinstance(raw_offenses, list): raw_offenses.reverse() for offense in raw_offenses: offense_id = max(offense_id, offense[""id""]) enriched_offenses = [] futures = [] for offense in raw_offenses: futures.append( EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, ) ) for future in concurrent.futures.as_completed(futures): enriched_offenses.append(future.result()) if is_reset_triggered(client.lock, handle_reset=True): return enriched_offenses.sort(key=lambda offense: offense.get(""id"", 0)) if ip_enrich or asset_enrich: print_debug_msg(""Enriching offenses"") enrich_offense_result(client, enriched_offenses, ip_enrich, asset_enrich) print_debug_msg(""Enriched offenses successfully."") new_incidents_samples = create_incidents(enriched_offenses, incident_type) incidents_batch_for_sample = ( new_incidents_samples if new_incidents_samples else last_run.get(""samples"", []) ) if incidents_batch_for_sample: context = {LAST_FETCH_KEY: offense_id, ""samples"": incidents_batch_for_sample[:1]} else: context = {LAST_FETCH_KEY: offense_id, ""samples"": []} set_integration_context(context, sync=SYNC_CONTEXT) ","def fetch_incidents_long_running_events( client: QRadarClient, incident_type, user_query, ip_enrich, asset_enrich, fetch_mode, events_columns, events_limit, ): last_run = get_integration_context(SYNC_CONTEXT) offense_id = last_run[""id""] if last_run and ""id"" in last_run else 0 raw_offenses = fetch_raw_offenses(client, offense_id, user_query) if len(raw_offenses) == 0: return if isinstance(raw_offenses, list): raw_offenses.reverse() for offense in raw_offenses: offense_id = max(offense_id, offense[""id""]) enriched_offenses = [] futures = [] for offense in raw_offenses: futures.append( EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, ) ) for future in concurrent.futures.as_completed(futures): enriched_offenses.append(future.result()) if is_reset_triggered(client.lock, handle_reset=True): return enriched_offenses.sort(key=lambda offense: offense.get(""id"", 0)) if ip_enrich or asset_enrich: print_debug_msg(""Enriching offenses"") enrich_offense_result(client, enriched_offenses, ip_enrich, asset_enrich) print_debug_msg(""Enriched offenses successfully."") new_incidents_samples = create_incidents(enriched_offenses, incident_type) incidents_batch_for_sample = ( new_incidents_samples if new_incidents_samples else last_run.get(""samples"", []) ) context = {LAST_FETCH_KEY: offense_id, ""samples"": incidents_batch_for_sample[:1]} set_integration_context(context, sync=SYNC_CONTEXT) " 57677,"def infinipoint_command(client: Client, args=None, optional_args=None, pagination=True): rules = None cve = [] method = ""POST"" # Cancel pagination if necessary if ""pagination"" in optional_args: pagination = optional_args['pagination'] # Pass arguments as is if ""pass_args"" in optional_args: rules = args # Move request type to GET elif ""get_req"" in optional_args: optional_args['route'] = optional_args['route'].format(**args) method = ""GET"" # Change url - Post request elif ""format_route"" in optional_args: optional_args['route'] = optional_args['route'].format(**args) else: rules = [] for k, v in optional_args['args'].items(): if args.get(k): rules.append({'field': k, ""operator"": v, ""value"": f""{args[k]}""}) res = client.call_api(optional_args['route'], rules, pagination=pagination, method=method) if res: for node in res: # Handle time format - convert to ISO from epoch if '$time' in node and isinstance(node['$time'], int): created_time_ms = int(node.get('$time', '0')) * 1000 node['$time'] = timestamp_to_datestring(created_time_ms) # CVE reputation if ""cve_id"" in res: cve = [Common.CVE( id=res['cve_id'], cvss=res['cve_dynamic_data']['base_metric_v2']['base_score'], description=res['cve_description'], published='', modified='' )] return CommandResults(outputs_prefix=optional_args['outputs_prefix'], outputs_key_field=optional_args['outputs_key_field'], outputs=res, indicators=cve) ","def infinipoint_command(client: Client, args=None, optional_args=None, pagination=True): rules = None cve = [] method = ""POST"" # Cancel pagination if necessary if ""pagination"" in optional_args: pagination = optional_args['pagination'] # Pass arguments as is if ""pass_args"" in optional_args: rules = args # Move request type to GET elif ""get_req"" in optional_args: optional_args['route'] = optional_args['route'].format(**args) method = ""GET"" # Change url - Post request elif ""format_route"" in optional_args: optional_args['route'] = optional_args['route'].format(**args) else: rules = [] for k, v in optional_args['args'].items(): if args.get(k): rules.append({'field': k, ""operator"": v, ""value"": f""{args[k]}""}) res = client.call_api(optional_args['route'], rules, pagination=pagination, method=method) if res: for node in res: # Handle time format - convert to ISO from epoch if '$time' in node and isinstance(node['$time'], int): created_time = datetime.fromtimestamp(int(node.get('$time', '0')), timezone.utc) node['$time'] = created_time.isoformat() # CVE reputation if ""cve_id"" in res: cve = [Common.CVE( id=res['cve_id'], cvss=res['cve_dynamic_data']['base_metric_v2']['base_score'], description=res['cve_description'], published='', modified='' )] return CommandResults(outputs_prefix=optional_args['outputs_prefix'], outputs_key_field=optional_args['outputs_key_field'], outputs=res, indicators=cve) " 34172,"def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): import rasa.cli.arguments.train as core_cli train_parser = subparsers.add_parser( ""train"", help=""Train a Rasa model using your NLU data and stories."", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) arguments.train.set_train_arguments(train_parser) train_subparsers = train_parser.add_subparsers() train_core_parser = train_subparsers.add_parser( ""core"", parents=parents, conflict_handler=""resolve"", formatter_class=argparse.ArgumentDefaultsHelpFormatter, help=""Train a Rasa Core model using your stories."", ) train_core_parser.set_defaults(func=train_core) train_nlu_parser = train_subparsers.add_parser( ""nlu"", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, help=""Train a Rasa NLU model using your NLU data."", ) train_nlu_parser.set_defaults(func=train_nlu) train_parser.set_defaults(func=train) arguments.train.set_train_core_arguments(train_core_parser) arguments.train.set_train_nlu_arguments(train_nlu_parser) ","def add_subparser( subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser] ): import rasa.cli.arguments.train as core_cli train_parser = subparsers.add_parser( ""train"", help=""Train a Rasa model using your NLU data and stories."", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) arguments.train.set_train_arguments(train_parser) train_subparsers = train_parser.add_subparsers() train_core_parser = train_subparsers.add_parser( ""core"", parents=parents, conflict_handler=""resolve"", formatter_class=argparse.ArgumentDefaultsHelpFormatter, help=""Train a Rasa Core model using your stories."", ) train_core_parser.set_defaults(func=train_core) train_nlu_parser = train_subparsers.add_parser( ""nlu"", parents=parents, formatter_class=argparse.ArgumentDefaultsHelpFormatter, help=""Trains a Rasa NLU model using your NLU data."", ) train_nlu_parser.set_defaults(func=train_nlu) train_parser.set_defaults(func=train) arguments.train.set_train_core_arguments(train_core_parser) arguments.train.set_train_nlu_arguments(train_nlu_parser) " 5768,"def roots_legendre(n, mu=False): r""""""Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature [GL]_. The sample points are the roots of the nth degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [GL] Gauss-Legendre quadrature, Wikipedia, https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature Examples -------- >>> from scipy.special import roots_legendre, eval_legendre >>> roots, weights = roots_legendre(9) ``roots`` holds the roots, and ``weights`` holds the weights for Gauss-Legendre quadrature. >>> roots array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. , 0.32425342, 0.61337143, 0.83603111, 0.96816024]) >>> weights array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936, 0.31234708, 0.2606107 , 0.18064816, 0.08127439]) Verify that we have the roots by evaluating the degree 9 Legendre polynomial at ``roots``. All the values are approximately zero: >>> eval_legendre(9, roots) array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16, 0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16, -8.32667268e-17]) Here we'll show how the above values can be used to estimate the integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre quadrature [GL]_. First define the function and the integration limits. >>> def f(t): ... return t + 1/t ... >>> a = 1 >>> b = 2 We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral of f from t=a to t=b. The sample points in ``roots`` are from the interval [-1, 1], so we'll rewrite the integral with the simple change of variable:: x = 2/(b - a) * t - (a + b)/(b - a) with inverse:: t = (b - a)/2 * x + (a + 2)/2 Then:: integral(f(t), a, b) = (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1) We can approximate the latter integral with the values returned by `roots_legendre`. Map the roots computed above from [-1, 1] to [a, b]. >>> t = (b - a)/2 * roots + (a + b)/2 Approximate the integral as the weighted sum of the function values. >>> (b - a)/2 * f(t).dot(weights) 2.1931471805599276 Compare that to the exact result, which is 3/2 + log(2): >>> 1.5 + np.log(2) 2.1931471805599454 """""" m = int(n) if n < 1 or n != m: raise ValueError(""n must be a positive integer."") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) ","def roots_legendre(n, mu=False): r""""""Gauss-Legendre quadrature. Compute the sample points and weights for Gauss-Legendre quadrature [GL]_. The sample points are the roots of the nth degree Legendre polynomial :math:`P_n(x)`. These sample points and weights correctly integrate polynomials of degree :math:`2n - 1` or less over the interval :math:`[-1, 1]` with weight function :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.quadrature scipy.integrate.fixed_quad numpy.polynomial.legendre.leggauss References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. .. [GL] Gauss-Legendre quadrature, Wikipedia, https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature Examples -------- >>> from scipy.special import roots_legendre, eval_legendre >>> roots, weights = roots_legendre(9) ``roots`` holds the roots, and ``weights`` holds the weights for Gauss-Legendre quadrature. >>> roots array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0., 0.32425342, 0.61337143, 0.83603111, 0.96816024]) >>> weights array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936, 0.31234708, 0.2606107 , 0.18064816, 0.08127439]) Verify that we have the roots by evaluating the degree 9 Legendre polynomial at ``roots``. All the values are approximately zero: >>> eval_legendre(9, roots) array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16, 0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16, -8.32667268e-17]) Here we'll show how the above values can be used to estimate the integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre quadrature [GL]_. First define the function and the integration limits. >>> def f(t): ... return t + 1/t ... >>> a = 1 >>> b = 2 We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral of f from t=a to t=b. The sample points in ``roots`` are from the interval [-1, 1], so we'll rewrite the integral with the simple change of variable:: x = 2/(b - a) * t - (a + b)/(b - a) with inverse:: t = (b - a)/2 * x + (a + 2)/2 Then:: integral(f(t), a, b) = (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1) We can approximate the latter integral with the values returned by `roots_legendre`. Map the roots computed above from [-1, 1] to [a, b]. >>> t = (b - a)/2 * roots + (a + b)/2 Approximate the integral as the weighted sum of the function values. >>> (b - a)/2 * f(t).dot(weights) 2.1931471805599276 Compare that to the exact result, which is 3/2 + log(2): >>> 1.5 + np.log(2) 2.1931471805599454 """""" m = int(n) if n < 1 or n != m: raise ValueError(""n must be a positive integer."") mu0 = 2.0 an_func = lambda k: 0.0 * k bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1)) f = cephes.eval_legendre df = lambda n, x: (-n*x*cephes.eval_legendre(n, x) + n*cephes.eval_legendre(n-1, x))/(1-x**2) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu) " 23039,"def from_dask_array(x, columns=None, index=None): """""" Create a Dask DataFrame from a Dask Array. Converts a 2d array into a DataFrame and a 1d array into a Series. Parameters ---------- x : da.Array columns : list or string list of column names if DataFrame, single string if Series index : dask.dataframe.Index, optional An optional *dask* Index to use for the output Series or DataFrame. The default output index depends on whether `x` has any unknown chunks. If there are any unknown chunks, the output has ``None`` for all the divisions (one per chunk). If all the chunks are known, a default index with known divsions is created. Specifying `index` can be useful if you're conforming a Dask Array to an existing dask Series or DataFrame, and you would like the indices to match. Examples ------- >>> import dask.array as da >>> import dask.dataframe as dd >>> x = da.ones((4, 2), chunks=(2, 2)) >>> df = dd.io.from_dask_array(x, columns=['a', 'b']) >>> df.compute() a b 0 1.0 1.0 1 1.0 1.0 2 1.0 1.0 3 1.0 1.0 See Also -------- dask.bag.to_dataframe: from dask.bag dask.dataframe._Frame.values: Reverse conversion dask.dataframe._Frame.to_records: Reverse conversion """""" meta = _meta_from_array(x, columns, index) if x.ndim == 2 and len(x.chunks[1]) > 1: x = x.rechunk({1: x.shape[1]}) name = ""from-dask-array"" + tokenize(x, columns) to_merge = [] if index is not None: if not isinstance(index, Index): raise ValueError(""'index' must be an instance of dask.dataframe.Index"") if index.npartitions != x.numblocks[0]: msg = ( ""The index and array have different numbers of blocks. "" ""({} != {})"".format(index.npartitions, x.numblocks[0]) ) raise ValueError(msg) divisions = index.divisions to_merge.append(ensure_dict(index.dask)) index = index.__dask_keys__() elif np.isnan(sum(x.shape)): divisions = [None] * (len(x.chunks[0]) + 1) index = [None] * len(x.chunks[0]) else: divisions = [0] for c in x.chunks[0]: divisions.append(divisions[-1] + c) index = [ (np.arange, a, b, 1, ""i8"") for a, b in zip(divisions[:-1], divisions[1:]) ] divisions[-1] -= 1 dsk = {} for i, (chunk, ind) in enumerate(zip(x.__dask_keys__(), index)): if x.ndim == 2: chunk = chunk[0] if isinstance(meta, pd.Series): dsk[name, i] = (pd.Series, chunk, ind, x.dtype, meta.name) else: dsk[name, i] = (pd.DataFrame, chunk, ind, meta.columns) to_merge.extend([ensure_dict(x.dask), dsk]) return new_dd_object(merge(*to_merge), name, meta, divisions) ","def from_dask_array(x, columns=None, index=None): """""" Create a Dask DataFrame from a Dask Array. Converts a 2d array into a DataFrame and a 1d array into a Series. Parameters ---------- x : da.Array columns : list or string list of column names if DataFrame, single string if Series index : dask.dataframe.Index, optional An optional *dask* Index to use for the output Series or DataFrame. The default output index depends on whether `x` has any unknown chunks. If there are any unknown chunks, the output has ``None`` for all the divisions (one per chunk). If all the chunks are known, a default index with known divsions is created. Specifying `index` can be useful if you're conforming a Dask Array to an existing dask Series or DataFrame, and you would like the indices to match. Examples -------- >>> import dask.array as da >>> import dask.dataframe as dd >>> x = da.ones((4, 2), chunks=(2, 2)) >>> df = dd.io.from_dask_array(x, columns=['a', 'b']) >>> df.compute() a b 0 1.0 1.0 1 1.0 1.0 2 1.0 1.0 3 1.0 1.0 See Also -------- dask.bag.to_dataframe: from dask.bag dask.dataframe._Frame.values: Reverse conversion dask.dataframe._Frame.to_records: Reverse conversion """""" meta = _meta_from_array(x, columns, index) if x.ndim == 2 and len(x.chunks[1]) > 1: x = x.rechunk({1: x.shape[1]}) name = ""from-dask-array"" + tokenize(x, columns) to_merge = [] if index is not None: if not isinstance(index, Index): raise ValueError(""'index' must be an instance of dask.dataframe.Index"") if index.npartitions != x.numblocks[0]: msg = ( ""The index and array have different numbers of blocks. "" ""({} != {})"".format(index.npartitions, x.numblocks[0]) ) raise ValueError(msg) divisions = index.divisions to_merge.append(ensure_dict(index.dask)) index = index.__dask_keys__() elif np.isnan(sum(x.shape)): divisions = [None] * (len(x.chunks[0]) + 1) index = [None] * len(x.chunks[0]) else: divisions = [0] for c in x.chunks[0]: divisions.append(divisions[-1] + c) index = [ (np.arange, a, b, 1, ""i8"") for a, b in zip(divisions[:-1], divisions[1:]) ] divisions[-1] -= 1 dsk = {} for i, (chunk, ind) in enumerate(zip(x.__dask_keys__(), index)): if x.ndim == 2: chunk = chunk[0] if isinstance(meta, pd.Series): dsk[name, i] = (pd.Series, chunk, ind, x.dtype, meta.name) else: dsk[name, i] = (pd.DataFrame, chunk, ind, meta.columns) to_merge.extend([ensure_dict(x.dask), dsk]) return new_dd_object(merge(*to_merge), name, meta, divisions) " 24792,"def node_frame_class(node: astroid.node_classes.NodeNG) -> Optional[astroid.ClassDef]: """"""Return the class that is wrapping the given node The function returns a class for a method node (or a staticmethod or a classmethod), otherwise it returns `None`. """""" klass = node.frame() nodes_to_check = ( astroid.node_classes.NodeNG, astroid.UnboundMethod, astroid.BaseInstance, ) while ( klass and isinstance(klass, nodes_to_check) and not isinstance(klass, astroid.ClassDef) ): if klass.parent is None: return None else: klass = klass.parent.frame() return klass ","def node_frame_class(node: astroid.node_classes.NodeNG) -> Optional[astroid.ClassDef]: """"""Return the class that is wrapping the given node The function returns a class for a method node (or a staticmethod or a classmethod), otherwise it returns `None`. """""" klass = node.frame() nodes_to_check = ( astroid.node_classes.NodeNG, astroid.UnboundMethod, astroid.BaseInstance, ) while ( klass and isinstance(klass, nodes_to_check) and not isinstance(klass, astroid.ClassDef) ): if klass.parent is None: return None klass = klass.parent.frame() return klass " 48013,"def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR', help='directory with demo binaries') parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR', help='directory with test data') parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR', help='directory to use as the cache for the model downloader') parser.add_argument('--demos', metavar='DEMO[,DEMO...]', help='list of demos to run tests for (by default, every demo is tested)') parser.add_argument('--scope', default='performance', help='The scenario for testing demos.', choices=('base', 'performance')) parser.add_argument('--all_metrics', default=False, action='store_true', help='Collect metrics for each stage of pipeline while performance testing.') parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--devices', default=""CPU GPU"", help='list of devices to test') parser.add_argument('--report-file', type=Path, help='path to report file') parser.add_argument('--suppressed-devices', type=Path, required=False, help='path to file with suppressed devices for each model') parser.add_argument('--precisions', type=str, nargs='+', default=['FP16'], help='IR precisions for all models. By default, models are tested in FP16 precision') return parser.parse_args() ","def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR', help='directory with demo binaries') parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR', help='directory with test data') parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR', help='directory to use as the cache for the model downloader') parser.add_argument('--demos', metavar='DEMO[,DEMO...]', help='list of demos to run tests for (by default, every demo is tested)') parser.add_argument('--scope', default='performance', help='The scenario for testing demos.', choices=('base', 'performance')) parser.add_argument('--all-metrics', default=False, action='store_true', help='Collect metrics for each stage of pipeline while performance testing.') parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--devices', default=""CPU GPU"", help='list of devices to test') parser.add_argument('--report-file', type=Path, help='path to report file') parser.add_argument('--suppressed-devices', type=Path, required=False, help='path to file with suppressed devices for each model') parser.add_argument('--precisions', type=str, nargs='+', default=['FP16'], help='IR precisions for all models. By default, models are tested in FP16 precision') return parser.parse_args() " 12045,"def main(): fname = iris.sample_data_path(""air_temp.pp"") temperature = iris.load_cube(fname) collapsed_temp = temperature.collapsed(""longitude"", MEAN) # Set y axes with -90 and 90 limits and spacing of 15 per tick. yticks = np.arange(-90, 105, 15) ylim = [-90, 90] fig = plt.figure(figsize=[12, 4]) ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree()) plt.sca(ax1) im = iplt.contourf(temperature, cmap=""RdYlBu_r"") ax1.coastlines() ax1.gridlines() ax1.set_xticks([-180, -90, 0, 90, 180], crs=ccrs.PlateCarree()) ax1.set_yticks(yticks, crs=ccrs.PlateCarree()) ax1.set_title(""Air Temperature"") ax1.set_ylabel(""latitude"") ax1.set_xlabel(""longitude"") ax1.set_ylim(*ylim) divider = make_axes_locatable(ax1) # Gives the air temperature bar size, colour and a title. ax2 = divider.new_vertical( size=""5%"", pad=0.5, axes_class=plt.Axes, pack_start=True ) fig.add_axes(ax2) plt.sca(ax2) cbar = plt.colorbar(im, cax=ax2, orientation=""horizontal"") cbar.ax.set_xlabel(""Air Temperature [k]"") # Round each tick for the third ax to the nearest 20 (ready for use). data_max = collapsed_temp.data.max() x_max = data_max - data_max % -20 data_min = collapsed_temp.data.min() x_min = data_min - data_min % 20 # Plot ""collapsed_temp"" on the mean graph and set the ticks and titles on the axes. ax3 = divider.new_horizontal(size=""30%"", pad=0.4, axes_class=plt.Axes) fig.add_axes(ax3) plt.sca(ax3) iplt.plot(collapsed_temp, collapsed_temp.coord(""latitude"")) ax3.axvline(0, color=""k"", linewidth=0.5) ax3.set_ylim(*ylim) ax3.set_title(""Zonal mean"") ax3.set_ylabel(""latitude"") ax3.set_xlabel(""Air Temperature [k]"") ax3.yaxis.set_label_position(""right"") ax3.yaxis.tick_right() ax3.set_yticks(yticks) ax3.set_xlim(x_min, x_max) plt.show() ","def main(): fname = iris.sample_data_path(""air_temp.pp"") temperature = iris.load_cube(fname) collapsed_temp = temperature.collapsed(""longitude"", MEAN) # Set y axes with -90 and 90 limits and spacing of 15 per tick. yticks = np.arange(-90, 105, 15) ylim = [-90, 90] fig = plt.figure(figsize=[12, 4]) ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree()) plt.sca(ax1) im = iplt.contourf(temperature, cmap=""RdYlBu_r"") ax1.coastlines() ax1.gridlines() ax1.set_xticks([-180, -90, 0, 90, 180], crs=ccrs.PlateCarree()) ax1.set_yticks(yticks, crs=ccrs.PlateCarree()) ax1.set_title(""Air Temperature"") ax1.set_ylabel(""latitude"") ax1.set_xlabel(""longitude"") ax1.set_ylim(*ylim) divider = make_axes_locatable(ax1) # Gives the air temperature bar size, colour and a title. ax2 = divider.new_vertical( size=""5%"", pad=0.5, axes_class=plt.Axes, pack_start=True ) fig.add_axes(ax2) plt.sca(ax2) cbar = plt.colorbar(im, cax=ax2, orientation=""horizontal"") cbar.ax.set_xlabel(""Air Temperature [k]"") # Round each tick for the third ax to the nearest 20 (ready for use). data_max = collapsed_temp.data.max() x_max = data_max - data_max % -20 data_min = collapsed_temp.data.min() x_min = data_min - data_min % 20 # Plot ""collapsed_temp"" on the mean graph and set the ticks and titles on the axes. ax3 = divider.new_horizontal(size=""30%"", pad=0.4, axes_class=plt.Axes) fig.add_axes(ax3) plt.sca(ax3) iplt.plot(collapsed_temp, collapsed_temp.coord(""latitude"")) ax3.axvline(0, color=""k"", linewidth=0.5) ax3.set_ylim(*ylim) ax3.set_title(""Zonal mean"") ax3.set_ylabel(""latitude"") ax3.set_xlabel(""Air Temperature [K]"") ax3.yaxis.set_label_position(""right"") ax3.yaxis.tick_right() ax3.set_yticks(yticks) ax3.set_xlim(x_min, x_max) plt.show() " 57212,"def regenerate_missing_stats_for_exploration( exp_id: str ) -> Tuple[list[str], list[str], int, int]: """"""Regenerates missing ExplorationStats models and entries for all corresponding states in an exploration. Args: exp_id: str. The ID of the exp. Returns: 4-tuple(missing_exp_stats, missing_state_stats, num_valid_exp_stats, num_valid_state_stats). where: missing_exp_stats: list(str). List of missing exploration stats. missing_state_stats: list(str). List of missing state stats. num_valid_exp_stats: int. Number of valid exploration stats. num_valid_state_stats: int. Number of valid state stats. Raises: Exception. Fetching exploration versions failed. Exception. No ExplorationStatsModels found. Exception. Exploration snapshots contain invalid commit_cmds. Exception. Exploration does not have a given state. """""" exploration = exp_fetchers.get_exploration_by_id(exp_id) num_valid_state_stats = 0 num_valid_exp_stats = 0 exp_versions = list(range(1, exploration.version + 1)) missing_exp_stats_indices = [] exp_stats_list = stats_services.get_multiple_exploration_stats_by_version( exp_id, exp_versions) exp_list = ( exp_fetchers .get_multiple_versioned_exp_interaction_ids_mapping_by_version( exp_id, exp_versions)) if all(exp_stats is None for exp_stats in exp_stats_list): for index, version in enumerate(exp_versions): exp_stats_for_version = ( stats_services.get_stats_for_new_exploration( exp_id, version, list(exp_list[index].state_interaction_ids_dict.keys()))) stats_services.create_stats_model(exp_stats_for_version) raise Exception('No ExplorationStatsModels found') snapshots = exp_models.ExplorationModel.get_snapshots_metadata( exp_id, exp_versions) change_lists = [] for snapshot in snapshots: try: change_lists.append([ exp_domain.ExplorationChange(commit_cmd) for commit_cmd in snapshot['commit_cmds'] ]) except utils.ValidationError as e: raise Exception( 'Exploration(id=%r) snapshots contain invalid commit_cmds: %r' % (exp_id, snapshot['commit_cmds'])) from e missing_exp_stats = [] missing_state_stats = [] zipped_items = list( zip(exp_stats_list, exp_list, change_lists)) revert_commit_cmd = exp_models.ExplorationModel.CMD_REVERT_COMMIT for i, (exp_stats, exp, change_list) in enumerate(zipped_items): revert_to_version = next( ( int(change.version_number) for change in change_list if change.cmd == revert_commit_cmd ), None) new_exp_version = None if revert_to_version is not None: exp_versions_diff = None # We subtract 2 from revert_to_version to get the index of the # previous exploration version because exp_stats_list and # prev_exp start with version 1 in the 0th index. prev_exp_version_index = revert_to_version - 2 prev_exp_stats = exp_stats_list[prev_exp_version_index] prev_exp = exp_list[prev_exp_version_index] new_exp_version = revert_to_version else: exp_versions_diff = exp_domain.ExplorationVersionsDiff( change_list) # We subtract 2 from exp.version to get the index of the # previous exploration version because exp_stats_list and # prev_exp start with version 1 in the 0th index. prev_exp_version_index = exp.version - 2 prev_exp_stats = exp_stats_list[prev_exp_version_index] prev_exp = exp_list[prev_exp_version_index] new_exp_version = exp.version # Fill missing Exploration-level stats. if exp_stats: num_valid_exp_stats += 1 elif exp.version == 1: new_exploration_stats = ( stats_services.get_stats_for_new_exploration( exp_id, exp.version, list(exp.state_interaction_ids_dict.keys()))) stats_services.create_stats_model(new_exploration_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) num_valid_state_stats += len( new_exploration_stats.state_stats_mapping) continue else: exp_stats = prev_exp_stats and prev_exp_stats.clone() if exp_stats is None: new_exploration_stats = ( stats_services.get_stats_for_new_exploration( exp_id, exp.version, list(exp.state_interaction_ids_dict.keys()))) stats_services.create_stats_model(new_exploration_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) num_valid_state_stats += len( new_exploration_stats.state_stats_mapping) continue if exp_versions_diff: exp_stats = stats_services.advance_version_of_exp_stats( new_exp_version, exp_versions_diff, exp_stats, None, None) else: exp_stats.exp_version = exp.version stats_services.create_stats_model(exp_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) # Fill missing State-level stats. state_stats_mapping = exp_stats.state_stats_mapping for state_name in exp.state_interaction_ids_dict.keys(): if state_name in state_stats_mapping: num_valid_state_stats += 1 continue if exp_versions_diff: prev_state_name = ( exp_versions_diff.new_to_old_state_names.get( state_name, state_name)) else: prev_state_name = state_name try: prev_interaction_id = ( prev_exp.state_interaction_ids_dict[prev_state_name] if prev_state_name in prev_exp.state_interaction_ids_dict else None) current_interaction_id = ( exp.state_interaction_ids_dict[state_name]) exp_stats_list_item = exp_stats_list[i] assert exp_stats_list_item is not None # In early schema versions of ExplorationModel, the END # card was a persistant, implicit state present in every # exploration. The snapshots of these old explorations have # since been migrated but they do not have corresponding state # stats models for the END state. So for such versions, a # default state stats model should be created. if current_interaction_id != prev_interaction_id or ( current_interaction_id == 'EndExploration' and prev_state_name == 'END'): exp_stats_list_item.state_stats_mapping[state_name] = ( stats_domain.StateStats.create_default() ) else: assert prev_exp_stats is not None exp_stats_list_item.state_stats_mapping[state_name] = ( prev_exp_stats.state_stats_mapping[ prev_state_name].clone() ) missing_state_stats.append( 'StateStats(exp_id=%r, exp_version=%r, ' 'state_name=%r)' % (exp_id, exp.version, state_name)) except Exception as e: assert exp_versions_diff is not None raise Exception( 'Exploration(id=%r, exp_version=%r) has no ' 'State(name=%r): %r' % ( exp_id, exp_stats.exp_version, prev_state_name, { 'added_state_names': ( exp_versions_diff.added_state_names), 'deleted_state_names': ( exp_versions_diff.deleted_state_names), 'new_to_old_state_names': ( exp_versions_diff.new_to_old_state_names), 'old_to_new_state_names': ( exp_versions_diff.old_to_new_state_names), 'prev_exp.states': ( prev_exp.state_interaction_ids_dict.keys()), 'prev_exp_stats': prev_exp_stats })) from e for index, exp_stats in enumerate(exp_stats_list): if index not in missing_exp_stats_indices: assert exp_stats is not None stats_services.save_stats_model(exp_stats) return ( missing_exp_stats, missing_state_stats, num_valid_exp_stats, num_valid_state_stats ) ","def regenerate_missing_stats_for_exploration( exp_id: str ) -> Tuple[List[str], List[str], int, int]: """"""Regenerates missing ExplorationStats models and entries for all corresponding states in an exploration. Args: exp_id: str. The ID of the exp. Returns: 4-tuple(missing_exp_stats, missing_state_stats, num_valid_exp_stats, num_valid_state_stats). where: missing_exp_stats: list(str). List of missing exploration stats. missing_state_stats: list(str). List of missing state stats. num_valid_exp_stats: int. Number of valid exploration stats. num_valid_state_stats: int. Number of valid state stats. Raises: Exception. Fetching exploration versions failed. Exception. No ExplorationStatsModels found. Exception. Exploration snapshots contain invalid commit_cmds. Exception. Exploration does not have a given state. """""" exploration = exp_fetchers.get_exploration_by_id(exp_id) num_valid_state_stats = 0 num_valid_exp_stats = 0 exp_versions = list(range(1, exploration.version + 1)) missing_exp_stats_indices = [] exp_stats_list = stats_services.get_multiple_exploration_stats_by_version( exp_id, exp_versions) exp_list = ( exp_fetchers .get_multiple_versioned_exp_interaction_ids_mapping_by_version( exp_id, exp_versions)) if all(exp_stats is None for exp_stats in exp_stats_list): for index, version in enumerate(exp_versions): exp_stats_for_version = ( stats_services.get_stats_for_new_exploration( exp_id, version, list(exp_list[index].state_interaction_ids_dict.keys()))) stats_services.create_stats_model(exp_stats_for_version) raise Exception('No ExplorationStatsModels found') snapshots = exp_models.ExplorationModel.get_snapshots_metadata( exp_id, exp_versions) change_lists = [] for snapshot in snapshots: try: change_lists.append([ exp_domain.ExplorationChange(commit_cmd) for commit_cmd in snapshot['commit_cmds'] ]) except utils.ValidationError as e: raise Exception( 'Exploration(id=%r) snapshots contain invalid commit_cmds: %r' % (exp_id, snapshot['commit_cmds'])) from e missing_exp_stats = [] missing_state_stats = [] zipped_items = list( zip(exp_stats_list, exp_list, change_lists)) revert_commit_cmd = exp_models.ExplorationModel.CMD_REVERT_COMMIT for i, (exp_stats, exp, change_list) in enumerate(zipped_items): revert_to_version = next( ( int(change.version_number) for change in change_list if change.cmd == revert_commit_cmd ), None) new_exp_version = None if revert_to_version is not None: exp_versions_diff = None # We subtract 2 from revert_to_version to get the index of the # previous exploration version because exp_stats_list and # prev_exp start with version 1 in the 0th index. prev_exp_version_index = revert_to_version - 2 prev_exp_stats = exp_stats_list[prev_exp_version_index] prev_exp = exp_list[prev_exp_version_index] new_exp_version = revert_to_version else: exp_versions_diff = exp_domain.ExplorationVersionsDiff( change_list) # We subtract 2 from exp.version to get the index of the # previous exploration version because exp_stats_list and # prev_exp start with version 1 in the 0th index. prev_exp_version_index = exp.version - 2 prev_exp_stats = exp_stats_list[prev_exp_version_index] prev_exp = exp_list[prev_exp_version_index] new_exp_version = exp.version # Fill missing Exploration-level stats. if exp_stats: num_valid_exp_stats += 1 elif exp.version == 1: new_exploration_stats = ( stats_services.get_stats_for_new_exploration( exp_id, exp.version, list(exp.state_interaction_ids_dict.keys()))) stats_services.create_stats_model(new_exploration_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) num_valid_state_stats += len( new_exploration_stats.state_stats_mapping) continue else: exp_stats = prev_exp_stats and prev_exp_stats.clone() if exp_stats is None: new_exploration_stats = ( stats_services.get_stats_for_new_exploration( exp_id, exp.version, list(exp.state_interaction_ids_dict.keys()))) stats_services.create_stats_model(new_exploration_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) num_valid_state_stats += len( new_exploration_stats.state_stats_mapping) continue if exp_versions_diff: exp_stats = stats_services.advance_version_of_exp_stats( new_exp_version, exp_versions_diff, exp_stats, None, None) else: exp_stats.exp_version = exp.version stats_services.create_stats_model(exp_stats) missing_exp_stats_indices.append(i) missing_exp_stats.append( 'ExplorationStats(exp_id=%r, exp_version=%r)' % (exp_id, exp.version)) # Fill missing State-level stats. state_stats_mapping = exp_stats.state_stats_mapping for state_name in exp.state_interaction_ids_dict.keys(): if state_name in state_stats_mapping: num_valid_state_stats += 1 continue if exp_versions_diff: prev_state_name = ( exp_versions_diff.new_to_old_state_names.get( state_name, state_name)) else: prev_state_name = state_name try: prev_interaction_id = ( prev_exp.state_interaction_ids_dict[prev_state_name] if prev_state_name in prev_exp.state_interaction_ids_dict else None) current_interaction_id = ( exp.state_interaction_ids_dict[state_name]) exp_stats_list_item = exp_stats_list[i] assert exp_stats_list_item is not None # In early schema versions of ExplorationModel, the END # card was a persistant, implicit state present in every # exploration. The snapshots of these old explorations have # since been migrated but they do not have corresponding state # stats models for the END state. So for such versions, a # default state stats model should be created. if current_interaction_id != prev_interaction_id or ( current_interaction_id == 'EndExploration' and prev_state_name == 'END'): exp_stats_list_item.state_stats_mapping[state_name] = ( stats_domain.StateStats.create_default() ) else: assert prev_exp_stats is not None exp_stats_list_item.state_stats_mapping[state_name] = ( prev_exp_stats.state_stats_mapping[ prev_state_name].clone() ) missing_state_stats.append( 'StateStats(exp_id=%r, exp_version=%r, ' 'state_name=%r)' % (exp_id, exp.version, state_name)) except Exception as e: assert exp_versions_diff is not None raise Exception( 'Exploration(id=%r, exp_version=%r) has no ' 'State(name=%r): %r' % ( exp_id, exp_stats.exp_version, prev_state_name, { 'added_state_names': ( exp_versions_diff.added_state_names), 'deleted_state_names': ( exp_versions_diff.deleted_state_names), 'new_to_old_state_names': ( exp_versions_diff.new_to_old_state_names), 'old_to_new_state_names': ( exp_versions_diff.old_to_new_state_names), 'prev_exp.states': ( prev_exp.state_interaction_ids_dict.keys()), 'prev_exp_stats': prev_exp_stats })) from e for index, exp_stats in enumerate(exp_stats_list): if index not in missing_exp_stats_indices: assert exp_stats is not None stats_services.save_stats_model(exp_stats) return ( missing_exp_stats, missing_state_stats, num_valid_exp_stats, num_valid_state_stats ) " 31708,"def _parse_field(raw_field: str, sep: str = ',', index_after_split: int = 0, chars_to_remove: str = '') -> str: ''' This function allows getting a specific complex sub-string. ""example,example2|"" -> 'example2' ''' if not raw_field: demisto.debug('Got empty raw field to parse.') return '' try: new_field = raw_field.split(sep)[index_after_split] except IndexError: demisto.error(f'raw: {raw_field}, split by {sep} has no index {index_after_split}') return '' chars_to_remove = set(chars_to_remove) for char in chars_to_remove: new_field = new_field.replace(char, '') return new_field ","def _parse_field(raw_field: str, sep: str = ',', index_after_split: int = 0, chars_to_remove: str = '') -> str: ''' This function allows getting a specific complex sub-string. ""example,example2|"" -> 'example2' ''' if not raw_field: demisto.debug('Got empty raw field to parse.') return '' try: new_field = raw_field.split(sep)[index_after_split] except IndexError: demisto.error(f'{INTEGRATION_NAME} raw: {raw_field}, split by {sep} has no index {index_after_split}') return '' chars_to_remove = set(chars_to_remove) for char in chars_to_remove: new_field = new_field.replace(char, '') return new_field " 5907,"def _get_html_page(link, session=None): # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] if session is None: raise TypeError( ""_get_html_page() missing 1 required keyword argument: 'session'"" ) url = link.url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: logger.debug('Cannot look at %s URL %s', vcs_scheme, link) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) try: resp = _get_html_response(url, session=session) except _NotHTTP: logger.debug( 'Skipping page %s because it looks like an archive, and cannot ' 'be checked by HEAD.', link, ) except _NotHTML as exc: logger.warning( 'Skipping page %s because the %s request got Content-Type: %s.' 'The supported Content-Type is text/html', link, exc.request_desc, exc.content_type, ) except HTTPError as exc: _handle_get_page_fail(link, exc) except RetryError as exc: _handle_get_page_fail(link, exc) except SSLError as exc: reason = ""There was a problem confirming the ssl certificate: "" reason += str(exc) _handle_get_page_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, ""connection error: {}"".format(exc)) except requests.Timeout: _handle_get_page_fail(link, ""timed out"") else: return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing) return None ","def _get_html_page(link, session=None): # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] if session is None: raise TypeError( ""_get_html_page() missing 1 required keyword argument: 'session'"" ) url = link.url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: logger.debug('Cannot look at %s URL %s', vcs_scheme, link) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) try: resp = _get_html_response(url, session=session) except _NotHTTP: logger.debug( 'Skipping page %s because it looks like an archive, and cannot ' 'be checked by HEAD.', link, ) except _NotHTML as exc: logger.warning( 'Skipping page %s because the %s request got Content-Type: %s.' 'The only supported Content-Type is text/html', link, exc.request_desc, exc.content_type, ) except HTTPError as exc: _handle_get_page_fail(link, exc) except RetryError as exc: _handle_get_page_fail(link, exc) except SSLError as exc: reason = ""There was a problem confirming the ssl certificate: "" reason += str(exc) _handle_get_page_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, ""connection error: {}"".format(exc)) except requests.Timeout: _handle_get_page_fail(link, ""timed out"") else: return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing) return None " 35025,"def scatter_nd(data, indices, updates, mode): """"""Scatter elements from a n-dimension array. Given data with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape (M, Y_0, ..., Y_{K-1}), and output with shape (X_0, X_1, ..., X_{N-1}), scatter_nd computes .. code-block:: output[indices[0, y_0, ..., y_{K-1}], ..., indices[M-1, y_0, ..., y_{K-1}], x_M, ..., x_{N-1} ] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] all other entries in the output are 0. Repeated indices are summed. Parameters ---------- data : tvm.te.Tensor The source array. indices : tvm.te.Tensor The indices of the values to extract. updates : tvm.te.Tensor The updates to apply at the Indices mode : string The update mode for the algorith, either ""update"" or ""add"" Returns ------- ret : tvm.te.Tensor """""" _verify_scatter_nd_inputs(data, indices, updates) def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr): # pylint: disable=invalid-name ib = tvm.tir.ir_builder.create() data = ib.buffer_ptr(data_ptr) indices = ib.buffer_ptr(indices_ptr) updates = ib.buffer_ptr(updates_ptr) out = ib.buffer_ptr(out_ptr) # We combine all the indices dimensions but the first one into a single # dimension so we can iterate it in single loop instead of an arbitrary # number of loops. We do the same thing for all the update dimensions. fused_indices_dimension = 1 for i in indices_ptr.shape[1:]: fused_indices_dimension *= i fused_updates_dimension = 1 for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]: fused_updates_dimension *= i fused_shape = 1 for i in data_ptr.shape: fused_shape *= i with ib.for_range(0, fused_shape) as i: out[i] = data[i] with ib.for_range(0, fused_indices_dimension) as i: with ib.for_range(0, fused_updates_dimension, kind=""parallel"") as j: offset = fused_updates_dimension index = j # This is x_M, .. x_{N-1} part of the index into out. # Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}] part # of the index into out. for l in reversed(range(indices_ptr.shape[0].value)): # indices[i * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}] index += offset * indices[i + l * fused_indices_dimension] offset *= data_ptr.shape[l] if mode == ""update"": out[index] = updates[i * fused_updates_dimension + j] elif mode == ""add"": out[index] += updates[i * fused_updates_dimension + j] else: raise NotImplementedError(""scatter_nd mode not supported:"", mode) return ib.get() out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, ""out_buf"") return te.extern( [data.shape], [data, indices, updates], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]), dtype=data.dtype, out_buffers=[out_buf], name=""scatter_nd_x86"", tag=""scatter_nd_x86"", ) ","def scatter_nd(data, indices, updates, mode): """"""Scatter elements from a n-dimension array. Given data with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape (M, Y_0, ..., Y_{K-1}), and output with shape (X_0, X_1, ..., X_{N-1}), scatter_nd computes .. code-block:: output[indices[0, y_0, ..., y_{K-1}], ..., indices[M-1, y_0, ..., y_{K-1}], x_M, ..., x_{N-1} ] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] all other entries in the output are 0. Repeated indices are summed. Parameters ---------- data : tvm.te.Tensor The source array. indices : tvm.te.Tensor The indices of the values to extract. updates : tvm.te.Tensor The updates to apply at the Indices mode : string The update mode for the algorithm, either ""update"" or ""add"" Returns ------- ret : tvm.te.Tensor """""" _verify_scatter_nd_inputs(data, indices, updates) def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr): # pylint: disable=invalid-name ib = tvm.tir.ir_builder.create() data = ib.buffer_ptr(data_ptr) indices = ib.buffer_ptr(indices_ptr) updates = ib.buffer_ptr(updates_ptr) out = ib.buffer_ptr(out_ptr) # We combine all the indices dimensions but the first one into a single # dimension so we can iterate it in single loop instead of an arbitrary # number of loops. We do the same thing for all the update dimensions. fused_indices_dimension = 1 for i in indices_ptr.shape[1:]: fused_indices_dimension *= i fused_updates_dimension = 1 for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]: fused_updates_dimension *= i fused_shape = 1 for i in data_ptr.shape: fused_shape *= i with ib.for_range(0, fused_shape) as i: out[i] = data[i] with ib.for_range(0, fused_indices_dimension) as i: with ib.for_range(0, fused_updates_dimension, kind=""parallel"") as j: offset = fused_updates_dimension index = j # This is x_M, .. x_{N-1} part of the index into out. # Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}] part # of the index into out. for l in reversed(range(indices_ptr.shape[0].value)): # indices[i * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}] index += offset * indices[i + l * fused_indices_dimension] offset *= data_ptr.shape[l] if mode == ""update"": out[index] = updates[i * fused_updates_dimension + j] elif mode == ""add"": out[index] += updates[i * fused_updates_dimension + j] else: raise NotImplementedError(""scatter_nd mode not supported:"", mode) return ib.get() out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, ""out_buf"") return te.extern( [data.shape], [data, indices, updates], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]), dtype=data.dtype, out_buffers=[out_buf], name=""scatter_nd_x86"", tag=""scatter_nd_x86"", ) " 39750,"def load_paths_image_csv(params, skip_csv=POSIX_CSV_LABEL): """""" loading content of two folder and specific pattern to obtain list of images and csv with centers, then it find the intersection between them according their unique names :param {str: str} params: :param str skip_csv: pattern in csv name that skips the file :return [(str, str)]: """""" logging.debug('loading pairs for %s and %s', params['path_csv'], params['path_images']) get_name = lambda p: os.path.splitext(os.path.basename(p))[0] list_csv = glob.glob(params['path_csv']) list_names = [get_name(p) for p in list_csv] # skip al names that contains given posix list_names = [n for n in list_names if skip_csv not in n] # filter to have just paths with the right names list_imgs = sorted([p for p in glob.glob(params['path_images']) if get_name(p) in list_names]) # update list of names list_names = [get_name(p) for p in list_imgs] # filter to have just paths with the right names list_csv = sorted([p for p in list_csv if get_name(p) in list_names]) if len(list_imgs) != len(list_csv): raise ValueError('the number of images (%i) and csv (%i) has to be same' % (len(list_imgs), len(list_csv))) list_join_img_csv = zip(list_imgs, list_csv) if not all(get_name(p1) == get_name(p2) for p1, p2 in list_join_img_csv): raise ValueError('names has to be same for %r' % list_join_img_csv) return list_join_img_csv ","def load_paths_image_csv(params, skip_csv=POSIX_CSV_LABEL): """""" loading content of two folder and specific pattern to obtain list of images and csv with centers, then it find the intersection between them according their unique names :param {str: str} params: :param str skip_csv: pattern in csv name that skips the file :return [(str, str)]: """""" logging.debug('loading pairs for %s and %s', params['path_csv'], params['path_images']) get_name = lambda p: os.path.splitext(os.path.basename(p))[0] list_csv = glob.glob(params['path_csv']) list_names = [get_name(p) for p in list_csv] # skip al names that contains given posix list_names = [n for n in list_names if skip_csv not in n] # filter to have just paths with the right names list_imgs = sorted([p for p in glob.glob(params['path_images']) if get_name(p) in list_names]) # update list of names list_names = [get_name(p) for p in list_imgs] # filter to have just paths with the right names list_csv = sorted([p for p in list_csv if get_name(p) in list_names]) if len(list_imgs) != len(list_csv): raise RuntimeError('the number of images (%i) and csv (%i) has to be same' % (len(list_imgs), len(list_csv))) list_join_img_csv = zip(list_imgs, list_csv) if not all(get_name(p1) == get_name(p2) for p1, p2 in list_join_img_csv): raise ValueError('names has to be same for %r' % list_join_img_csv) return list_join_img_csv " 14061,"def get_path(dataset): """""" Get the path to the data file. Parameters ---------- dataset : str The name of the dataset. See ``geopandas.datasets.available`` for all options. Examples -------- >>> geopandas.datasets.get_path(""naturalearth_lowres"") # doctest: +SKIP '/opt/miniconda3/envs/geo_env/lib/python3.8/site-packages/geopandas/datasets/\ naturalearth_lowres/naturalearth_lowres.shp' """""" if dataset in _available_dir: return os.path.abspath(os.path.join(_module_path, dataset, dataset + "".shp"")) elif dataset in _available_zip: fpath = os.path.abspath(os.path.join(_module_path, _available_zip[dataset])) return ""zip://"" + fpath else: msg = ""The dataset '{data}' is not available. "".format(data=dataset) msg += ""Available datasets are {}"".format("", "".join(available)) raise ValueError(msg) ","def get_path(dataset): """""" Get the path to the data file. Parameters ---------- dataset : str The name of the dataset. See ``geopandas.datasets.available`` for all options. Examples -------- >>> geopandas.datasets.get_path(""naturalearth_lowres"") # doctest: +SKIP '.../python3.8/site-packages/geopandas/datasets/\ naturalearth_lowres/naturalearth_lowres.shp' """""" if dataset in _available_dir: return os.path.abspath(os.path.join(_module_path, dataset, dataset + "".shp"")) elif dataset in _available_zip: fpath = os.path.abspath(os.path.join(_module_path, _available_zip[dataset])) return ""zip://"" + fpath else: msg = ""The dataset '{data}' is not available. "".format(data=dataset) msg += ""Available datasets are {}"".format("", "".join(available)) raise ValueError(msg) " 49547,"def unzip_descriptor(path, innerpath): frictionless = import_module(""frictionless"") resource = frictionless.Resource(path=path, compression="""") with frictionless.system.create_loader(resource) as loader: byte_stream = loader.byte_stream if loader.remote: byte_stream = tempfile.TemporaryFile() shutil.copyfileobj(loader.byte_stream, byte_stream) byte_stream.seek(0) with zipfile.ZipFile(byte_stream, ""r"") as zip: tempdir = tempfile.mkdtemp() zip.extractall(tempdir) atexit.register(shutil.rmtree, tempdir) if innerpath != """": pass elif os.path.isfile(os.path.join(tempdir, ""datapackage.json"")): innerpath = ""datapackage.json"" elif os.path.isfile(os.path.join(tempdir, ""datapackage.yaml"")): innerpath = ""datapackage.yaml"" else: # maybe an error innerpath = ""datapackage.json"" descriptor = os.path.join(tempdir, innerpath) return descriptor ","def unzip_descriptor(path, innerpath): frictionless = import_module(""frictionless"") resource = frictionless.Resource(path=path, compression="""") with frictionless.system.create_loader(resource) as loader: byte_stream = loader.byte_stream if loader.remote: byte_stream = tempfile.TemporaryFile() shutil.copyfileobj(loader.byte_stream, byte_stream) byte_stream.seek(0) with zipfile.ZipFile(byte_stream, ""r"") as zip: tempdir = tempfile.mkdtemp() zip.extractall(tempdir) atexit.register(shutil.rmtree, tempdir) if innerpath == """": innerpath = ""datapackage.json"" extensions = (""json"", ""yaml"", ""yml"") default_names = (f""datapackage.{ext}"" for ext in extensions) for name in default_names: if os.path.isfile(os.path.join(tempdir, name)): innerpath = name break descriptor = os.path.join(tempdir, innerpath) return descriptor " 49038,"def multi_shell_fiber_response(sh_order, bvals, evals, csf_md, gm_md): """"""Fiber response function estimation for multi-shell data. Parameters ---------- sh_order : int Maximum spherical harmonics order. bvals : ndarray Array containing the b-values. evals : (3,) ndarray Eigenvalues of the diffusion tensor. csf_md : float CSF tissue mean diffusivity value. gm_md : float GM tissue mean diffusivity value. Returns ------- MultiShellResponse MultiShellResponse object. """""" bvals = np.array(bvals, copy=True) evecs = np.zeros((3, 3)) z = np.array([0, 0, 1.]) evecs[:, 0] = z evecs[:2, 1:] = np.eye(2) n = np.arange(0, sh_order + 1, 2) m = np.zeros_like(n) big_sphere = default_sphere.subdivide() theta, phi = big_sphere.theta, big_sphere.phi B = shm.real_sph_harm(m, n, theta[:, None], phi[:, None]) A = shm.real_sph_harm(0, 0, 0, 0) response = np.empty([len(bvals), len(n) + 2]) for i, bvalue in enumerate(bvals): gtab = GradientTable(big_sphere.vertices * bvalue) wm_response = single_tensor(gtab, 1., evals, evecs, snr=None) response[i, 2:] = np.linalg.lstsq(B, wm_response)[0] response[i, 0] = np.exp(-bvalue * csf_md) / A response[i, 1] = np.exp(-bvalue * gm_md) / A return MultiShellResponse(response, sh_order, bvals) ","def multi_shell_fiber_response(sh_order, bvals, evals, csf_md, gm_md): """"""Fiber response function estimation for multi-shell data. Parameters ---------- sh_order : int Maximum spherical harmonics order. bvals : ndarray Array containing the b-values. evals : (3,) ndarray Eigenvalues of the diffusion tensor. csf_md : float CSF tissue mean diffusivity value. gm_md : float GM tissue mean diffusivity value. Returns ------- MultiShellResponse MultiShellResponse object. """""" bvals = np.array(bvals, copy=True) evecs = np.zeros((3, 3)) z = np.array([0, 0, 1.]) evecs[:, 0] = z evecs[:2, 1:] = np.eye(2) n = np.arange(0, sh_order + 1, 2) m = np.zeros_like(n) if sphere is None: from dipy.data import default_sphere sphere = default_sphere big_sphere = sphere.subdivide() theta, phi = big_sphere.theta, big_sphere.phi B = shm.real_sph_harm(m, n, theta[:, None], phi[:, None]) A = shm.real_sph_harm(0, 0, 0, 0) response = np.empty([len(bvals), len(n) + 2]) for i, bvalue in enumerate(bvals): gtab = GradientTable(big_sphere.vertices * bvalue) wm_response = single_tensor(gtab, 1., evals, evecs, snr=None) response[i, 2:] = np.linalg.lstsq(B, wm_response)[0] response[i, 0] = np.exp(-bvalue * csf_md) / A response[i, 1] = np.exp(-bvalue * gm_md) / A return MultiShellResponse(response, sh_order, bvals) " 13696,"def get_course_members(course_key, include_students=True, access_roles=None, prefetch_accessroles=False, prefetch_enrollments=False): """""" Returns a User queryset that filters all users related to a course. For example - Students, Teachers, Staffs etc. Arguments: course_key (CourseKey): the CourseKey for the course include_students: Wether or not to include students, access_roles: accepts an array of string course access roles. If None provided, it includes all roles. prefetch_accessroles: prefetches CourseAccessRole instances attached with user. This only includes CourseAccessRole instances related to provided CourseKey, prefetch_enrollments: prefetches CourseEnrollment instances attached with user. This only includes CourseEnrollment instances related to provided CourseKey, """""" queryset = User.objects.filter() # if access_roles not given, assign all registered access roles if access_roles is None: access_roles = REGISTERED_ACCESS_ROLES.keys() # conditions for filtering based on CourseAccessRole access_role_qs = Q( courseaccessrole__course_id=course_key, courseaccessrole__role__in=access_roles ) # conditions for filtering based on CourseEnrollment students_qs = Q( courseenrollment__course_id=course_key, courseenrollment__is_active=True ) if include_students: queryset = queryset.filter(access_role_qs | students_qs) else: queryset = queryset.filter(access_role_qs) # prefetch CourseAccessRole items related to the course and given roles if prefetch_accessroles: queryset = queryset.prefetch_related(Prefetch( 'courseaccessrole_set', CourseAccessRole.objects.filter( course_id=course_key, role__in=access_roles) )) # prefetch CourseEnrollment items related to the course if prefetch_enrollments: queryset = queryset.prefetch_related(Prefetch( 'courseenrollment_set', CourseEnrollment.objects.filter(course_id=course_key) )) # prevent duplicates queryset = queryset.distinct() return queryset ","def get_course_member_queryset(course_key, include_students=True, access_roles=None, prefetch_accessroles=False, prefetch_enrollments=False): """""" Returns a User queryset that filters all users related to a course. For example - Students, Teachers, Staffs etc. Arguments: course_key (CourseKey): the CourseKey for the course include_students: Wether or not to include students, access_roles: accepts an array of string course access roles. If None provided, it includes all roles. prefetch_accessroles: prefetches CourseAccessRole instances attached with user. This only includes CourseAccessRole instances related to provided CourseKey, prefetch_enrollments: prefetches CourseEnrollment instances attached with user. This only includes CourseEnrollment instances related to provided CourseKey, """""" queryset = User.objects.filter() # if access_roles not given, assign all registered access roles if access_roles is None: access_roles = REGISTERED_ACCESS_ROLES.keys() # conditions for filtering based on CourseAccessRole access_role_qs = Q( courseaccessrole__course_id=course_key, courseaccessrole__role__in=access_roles ) # conditions for filtering based on CourseEnrollment students_qs = Q( courseenrollment__course_id=course_key, courseenrollment__is_active=True ) if include_students: queryset = queryset.filter(access_role_qs | students_qs) else: queryset = queryset.filter(access_role_qs) # prefetch CourseAccessRole items related to the course and given roles if prefetch_accessroles: queryset = queryset.prefetch_related(Prefetch( 'courseaccessrole_set', CourseAccessRole.objects.filter( course_id=course_key, role__in=access_roles) )) # prefetch CourseEnrollment items related to the course if prefetch_enrollments: queryset = queryset.prefetch_related(Prefetch( 'courseenrollment_set', CourseEnrollment.objects.filter(course_id=course_key) )) # prevent duplicates queryset = queryset.distinct() return queryset " 1384,"def OneSE(cv_results, **kwargs): ''' oneSE is a callable refit option for CV whose aim is to balance model complexity and cross-validated score in the spirit of the ""one standard error"" rule of Breiman et al. (1984), which demonstrated that the tuning parameter associated with the best performance may be prone to overfit. Ergo, the simplest model within one standard error of the empirically optimal model is the better choice. This assumes that the models can be easily ordered from simplest to most complex based on some user-defined target parameter. By enabling the user to specify this parameter of interest, whether greater values of this parameter are to be defined as 'more complex', and a target scoring metric (i.e. in the case of multi-metric scoring), the `OneSE` function can be called directly by `refit` (e.g. in GridSearchCV). Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. param : str Parameter with the larges influence on model complexity. greater_is_complex : bool Whether complexity increases as `param` increases. Default is True. refit_scoring : str Scoring metric. tol : float Acceptable percent tolerance Returns ------- best_idx : int Index of a model that has the simplest parameter value (e.g. smallest number of PCA components) while has its test score within 1 standard deviation of the best `mean_test_score`. References ---------- Breiman, Friedman, Olshen, and Stone. (1984) Classification and Regression Trees. Wadsworth. Notes ----- Here, simplest is defined by the complexity of the model as influenced by some user-defined target parameter (e.g. number of components, number of estimators, polynomial degree, cost, scale, number hidden units, weight decay, number of nearest neighbors, L1/L2 penalty, etc.). See :ref:`sphx_glr_auto_examples_applications_plot_model_complexity_influence.py` See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py` ''' import sklearn.metrics def check_scorer(scoring_dict, refit_scoring): """""" Check whether the target refit scorer is negated. If so, adjusted greater_is_better accordingly. Parameters ---------- scoring_dict : dict A dictionary mapping scorers to booleans indicating whether `greater_is_better` refit_scoring : str Scoring metric. Returns ------- greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. """""" if refit_scoring not in scoring_dict.keys(): if refit_scoring.startswith('neg_'): greater_is_better = True else: raise KeyError('Scoring metric not available.') else: greater_is_better = [value for key, value in scoring_dict.items() if refit_scoring in key][0] return greater_is_better def bound(cv_results, greater_is_better, refit_scoring): """""" Calculate the upper/lower bound within 1 standard deviation of the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. refit_scoring : str Scoring metric. Returns ------- float Upper/lower bound within 1 standard deviation of the best `mean_test_score`. """""" best_mean_score = cv_results['mean_test_' + refit_scoring] best_std_score = cv_results['std_test_' + refit_scoring] if greater_is_better is True: best_score_idx = np.argmax(best_mean_score) out_bound = (best_mean_score[best_score_idx] - best_std_score[best_score_idx]) else: best_score_idx = np.argmin(best_mean_score) out_bound = (best_mean_score[best_score_idx] + best_std_score[best_score_idx]) return out_bound def percentile(cv_results, greater_is_better, refit_scoring, tol): """""" Returns the simplest model that is within a percent tolerance of the empirically optimal model with the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. refit_scoring : str Scoring metric. tol : float Acceptable percent tolerance Returns ------- out_bound : float Upper/lower bound within some percentile tolerance of the best `mean_test_score`. """""" best_mean_score = cv_results['mean_test_' + refit_scoring] if greater_is_better is True: best_score_idx = np.argmax(best_mean_score) else: best_score_idx = np.argmin(best_mean_score) out_bound = (np.abs(best_mean_score[best_score_idx]) - tol)/tol return out_bound def best_low_complexity(cv_results, param, refit_scoring, scoring_dict, greater_is_complex, greater_is_better, tol): """""" Balance model complexity with cross-validated score. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. refit_scoring : str Scoring metric. scoring_dict : dict A dictionary mapping scorers to booleans indicating whether `greater_is_better` param : str Parameter with the larges influence on model complexity. greater_is_complex : bool Whether complexity increases as `param` increases. Default is True. greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. Return ------ best_idx : int Index of a model that has the simplest parameter value (e.g. smallest number of PCA components) while has its test score within 1 standard deviation of the best `mean_test_score`. """""" # Check parameter whose complexity we seek to restrict if not any(param in x for x in cv_results['params'][0].keys()): raise KeyError('Parameter not found in cv grid.') else: param = [i for i in cv_results['params'][0].keys() if i.endswith(param)][0] if tol is None: threshold = bound(cv_results, greater_is_better, refit_scoring) else: threshold = percentile(cv_results, greater_is_better, refit_scoring, tol) if greater_is_complex is True: candidate_idx = np.flatnonzero(cv_results['mean_test_' + refit_scoring] >= threshold) else: candidate_idx = np.flatnonzero(cv_results['mean_test_' + refit_scoring] <= threshold) best_idx = candidate_idx[cv_results['param_' + param] [candidate_idx].argmin()] return best_idx scoring_funcs = [i for i in sklearn.metrics.__all__ if (i.endswith('_score')) or (i.endswith('_error'))] scoring_dict = dict(zip(scoring_funcs, [i.endswith('_score') for i in scoring_funcs])) greater_is_better = check_scorer(scoring_dict, refit_scoring) best_idx = best_low_complexity(cv_results, param, refit_scoring, scoring_dict, greater_is_complex, greater_is_better, tol) return best_idx","def OneSE(cv_results, **kwargs): ''' oneSE is a callable refit option for CV whose aim is to balance model complexity and cross-validated score in the spirit of the ""one standard error"" rule of Breiman et al. (1984), which demonstrated that the tuning parameter associated with the best performance may be prone to overfit. Ergo, the simplest model within one standard error of the empirically optimal model is the better choice. This assumes that the models can be easily ordered from simplest to most complex based on some user-defined target parameter. By enabling the user to specify this parameter of interest, whether greater values of this parameter are to be defined as 'more complex', and a target scoring metric (i.e. in the case of multi-metric scoring), the `OneSE` function can be called directly by `refit` (e.g. in GridSearchCV). Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. param : str Parameter with the larges influence on model complexity. greater_is_complex : bool Whether complexity increases as `param` increases. Default is True. refit_scoring : str Scoring metric. Parameter with the largest influence on model complexity. Acceptable percent tolerance Returns ------- best_idx : int Index of a model that has the simplest parameter value (e.g. smallest number of PCA components) while has its test score within 1 standard deviation of the best `mean_test_score`. References ---------- Breiman, Friedman, Olshen, and Stone. (1984) Classification and Regression Trees. Wadsworth. Notes ----- Here, simplest is defined by the complexity of the model as influenced by some user-defined target parameter (e.g. number of components, number of estimators, polynomial degree, cost, scale, number hidden units, weight decay, number of nearest neighbors, L1/L2 penalty, etc.). See :ref:`sphx_glr_auto_examples_applications_plot_model_complexity_influence.py` See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py` ''' import sklearn.metrics def check_scorer(scoring_dict, refit_scoring): """""" Check whether the target refit scorer is negated. If so, adjusted greater_is_better accordingly. Parameters ---------- scoring_dict : dict A dictionary mapping scorers to booleans indicating whether `greater_is_better` refit_scoring : str Scoring metric. Returns ------- greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. """""" if refit_scoring not in scoring_dict.keys(): if refit_scoring.startswith('neg_'): greater_is_better = True else: raise KeyError('Scoring metric not available.') else: greater_is_better = [value for key, value in scoring_dict.items() if refit_scoring in key][0] return greater_is_better def bound(cv_results, greater_is_better, refit_scoring): """""" Calculate the upper/lower bound within 1 standard deviation of the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. refit_scoring : str Scoring metric. Returns ------- float Upper/lower bound within 1 standard deviation of the best `mean_test_score`. """""" best_mean_score = cv_results['mean_test_' + refit_scoring] best_std_score = cv_results['std_test_' + refit_scoring] if greater_is_better is True: best_score_idx = np.argmax(best_mean_score) out_bound = (best_mean_score[best_score_idx] - best_std_score[best_score_idx]) else: best_score_idx = np.argmin(best_mean_score) out_bound = (best_mean_score[best_score_idx] + best_std_score[best_score_idx]) return out_bound def percentile(cv_results, greater_is_better, refit_scoring, tol): """""" Returns the simplest model that is within a percent tolerance of the empirically optimal model with the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. refit_scoring : str Scoring metric. tol : float Acceptable percent tolerance Returns ------- out_bound : float Upper/lower bound within some percentile tolerance of the best `mean_test_score`. """""" best_mean_score = cv_results['mean_test_' + refit_scoring] if greater_is_better is True: best_score_idx = np.argmax(best_mean_score) else: best_score_idx = np.argmin(best_mean_score) out_bound = (np.abs(best_mean_score[best_score_idx]) - tol)/tol return out_bound def best_low_complexity(cv_results, param, refit_scoring, scoring_dict, greater_is_complex, greater_is_better, tol): """""" Balance model complexity with cross-validated score. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. refit_scoring : str Scoring metric. scoring_dict : dict A dictionary mapping scorers to booleans indicating whether `greater_is_better` param : str Parameter with the larges influence on model complexity. greater_is_complex : bool Whether complexity increases as `param` increases. Default is True. greater_is_better : boolean Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. Return ------ best_idx : int Index of a model that has the simplest parameter value (e.g. smallest number of PCA components) while has its test score within 1 standard deviation of the best `mean_test_score`. """""" # Check parameter whose complexity we seek to restrict if not any(param in x for x in cv_results['params'][0].keys()): raise KeyError('Parameter not found in cv grid.') else: param = [i for i in cv_results['params'][0].keys() if i.endswith(param)][0] if tol is None: threshold = bound(cv_results, greater_is_better, refit_scoring) else: threshold = percentile(cv_results, greater_is_better, refit_scoring, tol) if greater_is_complex is True: candidate_idx = np.flatnonzero(cv_results['mean_test_' + refit_scoring] >= threshold) else: candidate_idx = np.flatnonzero(cv_results['mean_test_' + refit_scoring] <= threshold) best_idx = candidate_idx[cv_results['param_' + param] [candidate_idx].argmin()] return best_idx scoring_funcs = [i for i in sklearn.metrics.__all__ if (i.endswith('_score')) or (i.endswith('_error'))] scoring_dict = dict(zip(scoring_funcs, [i.endswith('_score') for i in scoring_funcs])) greater_is_better = check_scorer(scoring_dict, refit_scoring) best_idx = best_low_complexity(cv_results, param, refit_scoring, scoring_dict, greater_is_complex, greater_is_better, tol) return best_idx" 2062,"def _smacof_single(dissimilarities, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None, normalize=False): """"""Computes multidimensional scaling using SMACOF algorithm. Parameters ---------- dissimilarities : ndarray of shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : bool, default=True Compute metric or nonmetric SMACOF algorithm. n_components : int, default=2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. max_iter : int, default=300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, default=0 Level of verbosity. eps : float, default=1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int or RandomState instance, default=None Determines the random number generator used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term: `Glossary `. normalize : boolean, optional, default: False Whether use and return normed stress value (Stress-1) instead of raw stress calculated by default. Returns ------- X : ndarray of shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). If normalize is set to True, returns Stress-1 (according to Kruskal (1964, p. 3) value 0 indicates ""perfect"" fit, 0.025 excellent, 0.05 good, 0.1 fair, and 0.2 poor). n_iter : int The number of iterations corresponding to the best stress. """""" dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError(""init matrix should be of shape (%d, %d)"" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() # dissimilarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) # Compute stress stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 # Use Stress-1 if normalize: stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2)) # Update X using the Guttman transform dis[dis == 0] = 1e-5 ratio = disparities / dis B = - ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1. / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if(old_stress - stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis return X, stress, it + 1 ","def _smacof_single(dissimilarities, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None, normalize=False): """"""Computes multidimensional scaling using SMACOF algorithm. Parameters ---------- dissimilarities : ndarray of shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : bool, default=True Compute metric or nonmetric SMACOF algorithm. n_components : int, default=2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. max_iter : int, default=300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, default=0 Level of verbosity. eps : float, default=1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int or RandomState instance, default=None Determines the random number generator used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term: `Glossary `. normalize : bool, default=False Whether use and return normed stress value (Stress-1) instead of raw stress calculated by default. Returns ------- X : ndarray of shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). If normalize is set to True, returns Stress-1 (according to Kruskal (1964, p. 3) value 0 indicates ""perfect"" fit, 0.025 excellent, 0.05 good, 0.1 fair, and 0.2 poor). n_iter : int The number of iterations corresponding to the best stress. """""" dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError(""init matrix should be of shape (%d, %d)"" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() # dissimilarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) # Compute stress stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 # Use Stress-1 if normalize: stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2)) # Update X using the Guttman transform dis[dis == 0] = 1e-5 ratio = disparities / dis B = - ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1. / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if(old_stress - stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis return X, stress, it + 1 " 58627,"def _add_partitions_to_table( table: ""pyarrow.Table"", partitions: Dict[str, Any] ) -> ""pyarrow.Table"": for field in table.column_names: if field in partitions: raise RuntimeError( f""{field} is a partition key, but it's also the name of a column in the"" ""read dataset."" ) num_columns = table.num_columns for i, (field, value) in enumerate(partitions.items()): column = [[value] * len(table)] table = table.add_column(num_columns + i, field, column) return table ","def _add_partitions_to_table( table: ""pyarrow.Table"", partitions: Dict[str, Any] ) -> ""pyarrow.Table"": for field in table.column_names: if field in partitions: raise RuntimeError( f""{field} is a partition key, but it's also the name of a column in the"" ""read dataset."" ) column_names = set(table.column_names) for field, value in partitions.items(): if field in column_names: # Partition column exists in-data, validate that the in-data values are equal to the # partition value. import pyarrow.compute as pa # Cast partition value to column type. # TODO: Handle cast error. value = pa.scalar(value).cast(table.schema.field(field).type) # pac.equal() will return None if there are any null elements in the column, # so we explicitly check if the result is False instead of checking if its falsey. if pac.equal(value, table[field]) is False: raise ValueError( f""Partition column {field} exists in table data, but partition value {value} "" f""is different from in-data values: {table[field].unique()}"" ) else: column = pa.array([value] * len(table)) table = table.append_column(field, column) return table " 54477,"def _record_heartbeat( trial_id: int, storage: storages.RDBStorage, stop_event: threading.Event ) -> None: assert storage.heartbeat_interval is not None while True: if stop_event.is_set(): return storage.record_timestamp(trial_id) time.sleep(storage.heartbeat_interval) ","def _record_heartbeat( trial_id: int, storage: storages.RDBStorage, stop_event: threading.Event ) -> None: assert storage.heartbeat_interval is not None while True: storage.record_timestamp(trial_id) if stop_event.is_set(): return time.sleep(storage.heartbeat_interval) " 33240,"def stripped_to_photo(stripped): """""" adds the jpg header and footer to a stripped image. transcoded from https://github.com/telegramdesktop/tdesktop/blob/bec39d89e19670eb436dc794a8f20b657cb87c71/Telegram/SourceFiles/ui/image/image.cpp#L225 """""" if len(stripped) < 3 or stripped[0] != '\x01': return stripped header = bytearray(b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00(\x1c\x1e#\x1e\x19(#!#-+(0 None: kurobako_cmd = os.path.join(args.path_to_kurobako, ""kurobako"") subprocess.run(f""{kurobako_cmd} --version"", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f""Data directory {args.data_dir} cannot be found."") os.makedirs(args.out_dir, exist_ok=True) study_json_fn = os.path.join(args.out_dir, ""studies.json"") subprocess.check_call(f""echo >| {study_json_fn}"", shell=True) solvers_filename = os.path.join(args.out_dir, ""solvers.json"") subprocess.check_call(f""echo >| {solvers_filename}"", shell=True) problems_filename = os.path.join(args.out_dir, ""problems.json"") subprocess.check_call(f""echo >| {problems_filename}"", shell=True) # Create HPO bench problem. datasets = [ ""fcnet_tabular_benchmarks/fcnet_naval_propulsion_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_parkinsons_telemonitoring_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_protein_structure_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_slice_localization_data.hdf5"", ] for dataset in datasets: dataset = os.path.join(args.data_dir, dataset) cmd = f'{kurobako_cmd} problem hpobench ""{dataset}"" | tee -a {problems_filename}' subprocess.run(cmd, shell=True) # Create NAS bench problem. dataset = os.path.join(args.data_dir, ""nasbench_full.bin"") cmd = f'{kurobako_cmd} problem nasbench ""{dataset}"" | tee -a {problems_filename}' subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() pruner_list = args.pruner_list.split() pruner_kwargs_list = args.pruner_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( ""The number of samplers does not match the given keyword arguments. \n"" f""sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."" ) if len(pruner_list) != len(pruner_kwargs_list): raise ValueError( ""The number of pruners does not match the given keyword arguments. \n"" f""pruner_list: {pruner_list}, pruner_keyword_arguments: {pruner_kwargs_list}."" ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): for pruner, pruner_kwargs in zip(pruner_list, pruner_kwargs_list): name = f""{args.name_prefix}_{sampler}_{pruner}"" cmd = ( f""{kurobako_cmd} solver --name {name} optuna --loglevel debug "" f""--sampler {sampler} --sampler-kwargs {sampler_kwargs} "" f""--pruner {pruner} --pruner-kwargs {pruner_kwargs} "" f""| tee -a {solvers_filename}"" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f""{kurobako_cmd} studies --budget 80 "" f""--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "" f""--repeats {args.n_runs} --seed {args.seed} "" f""> {study_json_fn}"" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, ""results.json"") cmd = ( f""cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "" f""> {result_filename}"" ) subprocess.run(cmd, shell=True) report_filename = os.path.join(args.out_dir, ""report.md"") cmd = f""cat {result_filename} | {kurobako_cmd} report > {report_filename}"" subprocess.run(cmd, shell=True) cmd = ( f""cat {result_filename} | {kurobako_cmd} plot curve --errorbar -o {args.out_dir} --xmin 10"" ) subprocess.run(cmd, shell=True) ","def run(args: argparse.Namespace) -> None: kurobako_cmd = os.path.join(args.path_to_kurobako, ""kurobako"") subprocess.run(f""{kurobako_cmd} --version"", shell=True) if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)): raise ValueError(f""Data directory {args.data_dir} cannot be found."") os.makedirs(args.out_dir, exist_ok=True) study_json_fn = os.path.join(args.out_dir, ""studies.json"") with open(study_json_fn,'w') as f: pass solvers_filename = os.path.join(args.out_dir, ""solvers.json"") subprocess.check_call(f""echo >| {solvers_filename}"", shell=True) problems_filename = os.path.join(args.out_dir, ""problems.json"") subprocess.check_call(f""echo >| {problems_filename}"", shell=True) # Create HPO bench problem. datasets = [ ""fcnet_tabular_benchmarks/fcnet_naval_propulsion_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_parkinsons_telemonitoring_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_protein_structure_data.hdf5"", ""fcnet_tabular_benchmarks/fcnet_slice_localization_data.hdf5"", ] for dataset in datasets: dataset = os.path.join(args.data_dir, dataset) cmd = f'{kurobako_cmd} problem hpobench ""{dataset}"" | tee -a {problems_filename}' subprocess.run(cmd, shell=True) # Create NAS bench problem. dataset = os.path.join(args.data_dir, ""nasbench_full.bin"") cmd = f'{kurobako_cmd} problem nasbench ""{dataset}"" | tee -a {problems_filename}' subprocess.run(cmd, shell=True) # Create solvers. sampler_list = args.sampler_list.split() sampler_kwargs_list = args.sampler_kwargs_list.split() pruner_list = args.pruner_list.split() pruner_kwargs_list = args.pruner_kwargs_list.split() if len(sampler_list) != len(sampler_kwargs_list): raise ValueError( ""The number of samplers does not match the given keyword arguments. \n"" f""sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."" ) if len(pruner_list) != len(pruner_kwargs_list): raise ValueError( ""The number of pruners does not match the given keyword arguments. \n"" f""pruner_list: {pruner_list}, pruner_keyword_arguments: {pruner_kwargs_list}."" ) for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list): for pruner, pruner_kwargs in zip(pruner_list, pruner_kwargs_list): name = f""{args.name_prefix}_{sampler}_{pruner}"" cmd = ( f""{kurobako_cmd} solver --name {name} optuna --loglevel debug "" f""--sampler {sampler} --sampler-kwargs {sampler_kwargs} "" f""--pruner {pruner} --pruner-kwargs {pruner_kwargs} "" f""| tee -a {solvers_filename}"" ) subprocess.run(cmd, shell=True) # Create study. cmd = ( f""{kurobako_cmd} studies --budget 80 "" f""--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "" f""--repeats {args.n_runs} --seed {args.seed} "" f""> {study_json_fn}"" ) subprocess.run(cmd, shell=True) result_filename = os.path.join(args.out_dir, ""results.json"") cmd = ( f""cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "" f""> {result_filename}"" ) subprocess.run(cmd, shell=True) report_filename = os.path.join(args.out_dir, ""report.md"") cmd = f""cat {result_filename} | {kurobako_cmd} report > {report_filename}"" subprocess.run(cmd, shell=True) cmd = ( f""cat {result_filename} | {kurobako_cmd} plot curve --errorbar -o {args.out_dir} --xmin 10"" ) subprocess.run(cmd, shell=True) " 11937,"def remove_custom_flags(spec): for flag in sorted(_FORMATTERS.keys(), key=lambda k: -len(k)) + [""~""]: if flag: spec = spec.replace(flag, """") return spec ","def remove_custom_flags(spec): for flag in sorted(_FORMATTERS.keys(), key=len, reverse=True) + [""~""]: if flag: spec = spec.replace(flag, """") return spec " 57917,"def get_assignee(client: Client, args) -> str: return client.live_assign_get(args) ","def get_assignee(client: Client, args) -> str: analyst_ids = args.get('analyst_ids') category = args.get('category') created = args.get('created') arg_id = args.get('id') name = args.get('name') severity = args.get('severity') raw_response = client.live_assign_get(analyst_ids, category, created, arg_id, name, severity) analyst = response.get('analyst') human_readable=tableToMarkdown('Analyst Penfield Recommends', analyst, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='Penfield.Recommended', outputs_key_field='', outputs=analyst ) " 57930,"def list_attached_user_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) user_name = args.get('userName', """") marker = args.get('marker', None) limit, is_manual, page_size = get_limit(args) kwargs = { 'UserName': user_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_user_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size is not None and len(data) > page_size: data = data[-1 * page_size:] policy_data = [] for policy in data: policy_data.append({ 'UserName': user_name, 'PolicyArn': policy.get('PolicyArn', ''), 'PolicyName': policy.get('PolicyName', '') }) ec = {'AWS.IAM.AttachedUserPolicies(val.PolicyArn && val.UserName && val.PolicyArn === obj.PolicyArn && ' 'val.UserName === obj.UserName)': policy_data, 'AWS.IAM.Users(val.UserName === \'{}\').AttachedPoliciesMarker'.format(user_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for user {}'.format(user_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec) ","def list_attached_user_policies(args, aws_client): client = aws_client.aws_session( service=SERVICE, role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration'), ) user_name = args.get('userName', """") marker = args.get('marker', None) limit, is_manual, page_size = get_limit(args) kwargs = { 'UserName': user_name, 'MaxItems': limit } if marker: kwargs.update({'Marker': marker}) response = client.list_attached_user_policies(**kwargs) data = response.get('AttachedPolicies', []) marker = response.get('Marker', None) if is_manual and page_size is not None and len(data) > page_size: data = data[-1 * page_size:] policy_data = [{ 'UserName': user_name, 'PolicyArn': policy.get('PolicyArn'), 'PolicyName': policy.get('PolicyName') } for policy in data] ec = {'AWS.IAM.AttachedUserPolicies(val.PolicyArn && val.UserName && val.PolicyArn === obj.PolicyArn && ' 'val.UserName === obj.UserName)': policy_data, 'AWS.IAM.Users(val.UserName === \'{}\').AttachedPoliciesMarker'.format(user_name): marker} human_readable = tableToMarkdown('AWS IAM Attached Policies for user {}'.format(user_name), headers=['PolicyName', 'PolicyArn'], headerTransform=pascalToSpace, t=data) return_outputs(human_readable, ec) " 1079,"def bids_gen_info(bids_event_files, condition_column='trial_type', amplitude_column=None, time_repetition=False, ): """"""Generate subject_info structure from a list of BIDS .tsv event files. Parameters ---------- bids_event_files : list of str Filenames of BIDS .tsv event files containing columns including: 'onset', 'duration', and 'trial_type' or the `condition_column` value. condition_column : str Column of files in `bids_event_files` based on the values of which events will be sorted into different regressors amplitude_column : str Column of files in `bids_event_files` based on the values of which to apply amplitudes to events. If unspecified, all events will be represented with an amplitude of 1. Returns ------- list of Bunch """""" info = [] for bids_event_file in bids_event_files: with open(bids_event_file) as f: f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') events = [{k: v for k, v in row.items()} for row in f_events] conditions = list(set([i[condition_column] for i in events])) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: selected_events = [i for i in events if i[condition_column]==condition] onsets = [float(i['onset']) for i in selected_events] durations = [float(i['duration']) for i in selected_events] if time_repetition: decimals = math.ceil(-math.log10(time_repetition)) onsets = [round(i,decimals) for i in onsets] durations = [round(i,decimals) for i in durations] if condition: runinfo.conditions.append(condition) else: runinfo.conditions.append('e0') runinfo.onsets.append(onsets) runinfo.durations.append(durations) try: amplitudes = [float(i[amplitude_column]) for i in selected_events] runinfo.amplitudes.append(amplitudes) except KeyError: runinfo.amplitudes.append([1]*len(onsets)) info.append(runinfo) return info ","def bids_gen_info(bids_event_files, condition_column='trial_type', amplitude_column=None, time_repetition=False, ): """"""Generate subject_info structure from a list of BIDS .tsv event files. Parameters ---------- bids_event_files : list of str Filenames of BIDS .tsv event files containing columns including: 'onset', 'duration', and 'trial_type' or the `condition_column` value. condition_column : str Column of files in `bids_event_files` based on the values of which events will be sorted into different regressors amplitude_column : str Column of files in `bids_event_files` based on the values of which to apply amplitudes to events. If unspecified, all events will be represented with an amplitude of 1. Returns ------- list of Bunch """""" info = [] for bids_event_file in bids_event_files: with open(bids_event_file) as f: f_events = csv.DictReader(f, skipinitialspace=True, delimiter='\t') events = [{k: v for k, v in row.items()} for row in f_events] conditions = list(set([i[condition_column] for i in events])) runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for condition in conditions: selected_events = [i for i in events if i[condition_column]==condition] onsets = [float(i['onset']) for i in selected_events] durations = [float(i['duration']) for i in selected_events] if time_repetition: decimals = math.ceil(-math.log10(time_repetition)) onsets = [round(i,decimals) for i in onsets] durations = [round(i,decimals) for i in durations] if condition: runinfo.conditions.append(condition) else: runinfo.conditions.append('e0') runinfo.onsets.append(onsets) runinfo.durations.append(durations) try: amplitudes = [float(i[amplitude_column]) for i in selected_events] runinfo.amplitudes.append(amplitudes) except KeyError: runinfo.amplitudes.append([1] * len(onsets)) info.append(runinfo) return info " 35192,"def _compile_with_cache_hip(source, options, arch, cache_dir, extra_source, backend='hiprtc', name_expressions=None, log_stream=None, cache_in_memory=False, use_converter=True): global _empty_file_preprocess_cache # TODO(leofang): this might be possible but is currently undocumented if _is_cudadevrt_needed(options): raise ValueError('separate compilation is not supported in HIP') if cache_dir is None: cache_dir = get_cache_dir() # As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the # right arch without setting HCC_AMDGPU_TARGET, so we don't need # to tell the compiler which arch we are targeting. But, we still # need to know arch as part of the cache key: if arch is None: # On HIP, gcnArch is computed from ""compute capability"": # https://github.com/ROCm-Developer-Tools/HIP/blob/2080cc113a2d767352b512b9d24c0620b6dee790/rocclr/hip_device.cpp#L202 arch = device.Device().compute_capability if use_converter: source = _convert_to_hip_source(source, extra_source, is_hiprtc=(backend == 'hiprtc')) env = (arch, options, _get_nvrtc_version(), backend) base = _empty_file_preprocess_cache.get(env, None) if base is None: # This is for checking HIPRTC/HIPCC compiler internal version if backend == 'hiprtc': base = _preprocess_hiprtc('', options) else: base = _preprocess_hipcc('', options) _empty_file_preprocess_cache[env] = base key_src = '%s %s %s %s' % (env, base, source, extra_source) key_src = key_src.encode('utf-8') name = '%s.hsaco' % hashlib.md5(key_src).hexdigest() mod = function.Module() if not cache_in_memory: # Read from disk cache if not os.path.isdir(cache_dir): os.makedirs(cache_dir, exist_ok=True) # To handle conflicts in concurrent situation, we adopt lock-free # method to avoid performance degradation. # We force recompiling to retrieve C++ mangled names if so desired. path = os.path.join(cache_dir, name) if os.path.exists(path) and not name_expressions: with open(path, 'rb') as f: data = f.read() if len(data) >= 32: hash_value = data[:32] binary = data[32:] binary_hash = hashlib.md5(binary).hexdigest().encode('ascii') if hash_value == binary_hash: mod.load(binary) return mod else: # Enforce compiling -- the resulting kernel will be cached elsewhere, # so we do nothing pass if backend == 'hiprtc': # compile_using_nvrtc calls hiprtc for hip builds binary, mapping = compile_using_nvrtc( source, options, arch, name + '.cu', name_expressions, log_stream, cache_in_memory) mod._set_mapping(mapping) else: binary = compile_using_hipcc(source, options, arch, log_stream) if not cache_in_memory: # Write to disk cache binary_hash = hashlib.md5(binary).hexdigest().encode('ascii') # shutil.move is not atomic operation, so it could result in a # corrupted file. We detect it by appending md5 hash at the beginning # of each cache file. If the file is corrupted, it will be ignored # next time it is read. with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf: tf.write(binary_hash) tf.write(binary) temp_path = tf.name shutil.move(temp_path, path) # Save .cu source file along with .hsaco if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False): with open(path + '.cpp', 'w') as f: f.write(source) else: # we don't do any disk I/O pass mod.load(binary) return mod ","def _compile_with_cache_hip(source, options, arch, cache_dir, extra_source, backend='hiprtc', name_expressions=None, log_stream=None, cache_in_memory=False, use_converter=True): global _empty_file_preprocess_cache # TODO(leofang): this might be possible but is currently undocumented if _is_cudadevrt_needed(options): raise ValueError('separate compilation is not supported in HIP') if cache_dir is None: cache_dir = get_cache_dir() # As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the # right arch without setting HCC_AMDGPU_TARGET, so we don't need # to tell the compiler which arch we are targeting. But, we still # need to know arch as part of the cache key: if arch is None: # On HIP, gcnArch is computed from ""compute capability"": # https://github.com/ROCm-Developer-Tools/HIP/blob/rocm-4.0.0/rocclr/hip_device.cpp#L202 arch = device.Device().compute_capability if use_converter: source = _convert_to_hip_source(source, extra_source, is_hiprtc=(backend == 'hiprtc')) env = (arch, options, _get_nvrtc_version(), backend) base = _empty_file_preprocess_cache.get(env, None) if base is None: # This is for checking HIPRTC/HIPCC compiler internal version if backend == 'hiprtc': base = _preprocess_hiprtc('', options) else: base = _preprocess_hipcc('', options) _empty_file_preprocess_cache[env] = base key_src = '%s %s %s %s' % (env, base, source, extra_source) key_src = key_src.encode('utf-8') name = '%s.hsaco' % hashlib.md5(key_src).hexdigest() mod = function.Module() if not cache_in_memory: # Read from disk cache if not os.path.isdir(cache_dir): os.makedirs(cache_dir, exist_ok=True) # To handle conflicts in concurrent situation, we adopt lock-free # method to avoid performance degradation. # We force recompiling to retrieve C++ mangled names if so desired. path = os.path.join(cache_dir, name) if os.path.exists(path) and not name_expressions: with open(path, 'rb') as f: data = f.read() if len(data) >= 32: hash_value = data[:32] binary = data[32:] binary_hash = hashlib.md5(binary).hexdigest().encode('ascii') if hash_value == binary_hash: mod.load(binary) return mod else: # Enforce compiling -- the resulting kernel will be cached elsewhere, # so we do nothing pass if backend == 'hiprtc': # compile_using_nvrtc calls hiprtc for hip builds binary, mapping = compile_using_nvrtc( source, options, arch, name + '.cu', name_expressions, log_stream, cache_in_memory) mod._set_mapping(mapping) else: binary = compile_using_hipcc(source, options, arch, log_stream) if not cache_in_memory: # Write to disk cache binary_hash = hashlib.md5(binary).hexdigest().encode('ascii') # shutil.move is not atomic operation, so it could result in a # corrupted file. We detect it by appending md5 hash at the beginning # of each cache file. If the file is corrupted, it will be ignored # next time it is read. with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf: tf.write(binary_hash) tf.write(binary) temp_path = tf.name shutil.move(temp_path, path) # Save .cu source file along with .hsaco if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False): with open(path + '.cpp', 'w') as f: f.write(source) else: # we don't do any disk I/O pass mod.load(binary) return mod " 43694,"def bit_driver(wires, n): r""""""Returns the bit-driver cost Hamiltonian component. This Hamiltonian is defined as: .. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i where :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when constructing larger QAOA cost Hamiltonians. Args: wires (Iterable or Wires): The wires on which the returned Hamiltonian acts n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively. Returns: .Hamiltonian **Example** >>> wires = range(3) >>> hamiltonian = qaoa.pauli_driver(wires, 1) >>> print(hamiltonian) (1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2] """""" if n == 0: coeffs = [-1 for _ in wires] elif n == 1: coeffs = [1 for _ in wires] else: raise ValueError(""'state' argument must be either 0 or 1, got {}"".format(n)) ops = [qml.PauliZ(w) for w in wires] return qml.Hamiltonian(coeffs, ops) ","def bit_driver(wires, n): r""""""Returns the bit-driver cost Hamiltonian component. This Hamiltonian is defined as: .. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i where :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when constructing larger QAOA cost Hamiltonians. Args: wires (Iterable or Wires): The wires on which the returned Hamiltonian acts n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns lower energies to bitstrings with a majority of :math:`0`s or a majority of :math:`1`s, respectively. Returns: .Hamiltonian **Example** >>> wires = range(3) >>> hamiltonian = qaoa.pauli_driver(wires, 1) >>> print(hamiltonian) (1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2] """""" if n == 0: coeffs = [-1 for _ in wires] elif n == 1: coeffs = [1 for _ in wires] else: raise ValueError(""'state' argument must be either 0 or 1, got {}"".format(n)) ops = [qml.PauliZ(w) for w in wires] return qml.Hamiltonian(coeffs, ops) " 46052,"def imshow_bboxes(img: Union[str, np.ndarray], bboxes: List[int, float], colors: Union[str, tuple, int, np.ndarray] = 'green', top_k: int = -1, thickness: int = 1, show: bool = True, win_name: str = '', wait_time: int = 0, out_file: Optional[str] = None): """"""Draw bboxes on an image. Args: img (str or ndarray): The image to be displayed. bboxes (list or ndarray): A list of ndarray of shape (k, 4). colors (list[str or tuple or Color]): A list of colors. top_k (int): Plot the first k bboxes only if set positive. thickness (int): Thickness of lines. show (bool): Whether to show the image. win_name (str): The window name. wait_time (int): Value of waitKey param. out_file (str, optional): The filename to write the image. Returns: ndarray: The image with bboxes drawn on it. """""" img = imread(img) img = np.ascontiguousarray(img) if isinstance(bboxes, np.ndarray): bboxes = [bboxes] if not isinstance(colors, list): colors = [colors for _ in range(len(bboxes))] colors = [color_val(c) for c in colors] assert len(bboxes) == len(colors) for i, _bboxes in enumerate(bboxes): _bboxes = _bboxes.astype(np.int32) if top_k <= 0: _top_k = _bboxes.shape[0] else: _top_k = min(top_k, _bboxes.shape[0]) for j in range(_top_k): left_top = (_bboxes[j, 0], _bboxes[j, 1]) right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) cv2.rectangle( img, left_top, right_bottom, colors[i], thickness=thickness) if show: imshow(img, win_name, wait_time) if out_file is not None: imwrite(img, out_file) return img ","def imshow_bboxes(img: Union[str, np.ndarray], bboxes: Union[list, np.ndarry], colors: Union[str, tuple, int, np.ndarray] = 'green', top_k: int = -1, thickness: int = 1, show: bool = True, win_name: str = '', wait_time: int = 0, out_file: Optional[str] = None): """"""Draw bboxes on an image. Args: img (str or ndarray): The image to be displayed. bboxes (list or ndarray): A list of ndarray of shape (k, 4). colors (list[str or tuple or Color]): A list of colors. top_k (int): Plot the first k bboxes only if set positive. thickness (int): Thickness of lines. show (bool): Whether to show the image. win_name (str): The window name. wait_time (int): Value of waitKey param. out_file (str, optional): The filename to write the image. Returns: ndarray: The image with bboxes drawn on it. """""" img = imread(img) img = np.ascontiguousarray(img) if isinstance(bboxes, np.ndarray): bboxes = [bboxes] if not isinstance(colors, list): colors = [colors for _ in range(len(bboxes))] colors = [color_val(c) for c in colors] assert len(bboxes) == len(colors) for i, _bboxes in enumerate(bboxes): _bboxes = _bboxes.astype(np.int32) if top_k <= 0: _top_k = _bboxes.shape[0] else: _top_k = min(top_k, _bboxes.shape[0]) for j in range(_top_k): left_top = (_bboxes[j, 0], _bboxes[j, 1]) right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) cv2.rectangle( img, left_top, right_bottom, colors[i], thickness=thickness) if show: imshow(img, win_name, wait_time) if out_file is not None: imwrite(img, out_file) return img " 30688,"def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool: """""" Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'. If not- it means that the integration represented by the lock file is currently locked and is tested in another build Args: lock_file: The lock file blob object lock_timeout: The expiration timeout of the lock in seconds Returns: True if the lock has expired it's timeout, else False """""" return datetime.datetime.now(tz=pytz.utc) - lock_file.time_created >= datetime.timedelta(seconds=int(lock_timeout)) ","def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool: """""" Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'. current_integrations_lock_files = get_locked_integrations(integrations, storage_client) Args: lock_file: The lock file blob object lock_timeout: The expiration timeout of the lock in seconds Returns: True if the lock has expired it's timeout, else False """""" return datetime.datetime.now(tz=pytz.utc) - lock_file.time_created >= datetime.timedelta(seconds=int(lock_timeout)) " 56455,"def average_rowwise(axs_and_cbaxs: AxesTupleList) -> AxesTupleList: """""" Take the output plots of plot_by_id and average every heatmap along its columns. Leave the line plots unchanged. Args: axs_and_cbaxs: The output of plot_by_id Returns: The same axes and colorbars, but changed to now hold line plots where once were heatmaps """""" modified_axs = [] modified_cbaxs = [] for ax, cbax in zip(*axs_and_cbaxs): mod_ax, mod_cbax = _average_heatmap(ax, cbax, avg_dim='row') modified_axs.append(mod_ax) modified_cbaxs.append(mod_cbax) return modified_axs, modified_cbaxs","def average_rowwise(axs_and_cbaxs: AxesTupleList) -> AxesTupleList: """""" Take the output plots of plot_by_id and average every heatmap along its columns. Leave the line plots unchanged. Args: axs_and_cbaxs: The output of plot_by_id Returns: The same axes and colorbars, but changed to now hold line plots where once were heatmaps """""" modified_axs = [] modified_cbaxs = [] for ax, cbax in zip(*axs_and_cbaxs): mod_ax, mod_cbax = _average_heatmap(ax, cbax, avg_dim='row') modified_axs.append(mod_ax) modified_cbaxs.append(mod_cbax) return modified_axs, modified_cbaxs " 23079,"def test_getitem_timestamp_str(): df = pd.DataFrame( {""A"": np.random.randn(100), ""B"": np.random.randn(100)}, index=pd.date_range(""2011-01-01"", freq=""H"", periods=100), ) ddf = dd.from_pandas(df, 10) # partial string slice assert_eq(df.loc[""2011-01-02""], ddf.loc[""2011-01-02""]) assert_eq(df.loc[""2011-01-02"":""2011-01-10""], df.loc[""2011-01-02"":""2011-01-10""]) df = pd.DataFrame( {""A"": np.random.randn(100), ""B"": np.random.randn(100)}, index=pd.date_range(""2011-01-01"", freq=""D"", periods=100), ) ddf = dd.from_pandas(df, 50) assert_eq(df.loc[""2011-01""], ddf.loc[""2011-01""]) assert_eq(df.loc[""2011""], ddf.loc[""2011""]) assert_eq(df.loc[""2011-01"":""2012-05""], ddf.loc[""2011-01"":""2012-05""]) assert_eq(df.loc[""2011"":""2015""], ddf.loc[""2011"":""2015""]) ","def test_getitem_timestamp_str(): df = pd.DataFrame( {""A"": np.random.randn(100), ""B"": np.random.randn(100)}, index=pd.date_range(""2011-01-01"", freq=""H"", periods=100), ) ddf = dd.from_pandas(df, 10) # partial string slice assert_eq(df.loc[""2011-01-02""], ddf.loc[""2011-01-02""]) assert_eq(df.loc[""2011-01-02"":""2011-01-10""], ddf.loc[""2011-01-02"":""2011-01-10""]) df = pd.DataFrame( {""A"": np.random.randn(100), ""B"": np.random.randn(100)}, index=pd.date_range(""2011-01-01"", freq=""D"", periods=100), ) ddf = dd.from_pandas(df, 50) assert_eq(df.loc[""2011-01""], ddf.loc[""2011-01""]) assert_eq(df.loc[""2011""], ddf.loc[""2011""]) assert_eq(df.loc[""2011-01"":""2012-05""], ddf.loc[""2011-01"":""2012-05""]) assert_eq(df.loc[""2011"":""2015""], ddf.loc[""2011"":""2015""]) " 55891,"def test_rule_objects(schema_obj): """"""Ensure that all objects referenced in the schema rules are defined in its object portion. This test currently fails because rules files reference object keys for some object types, including entities, columns, and metadata fields, but reference ""name"" or ""value"" elements of the object definitions for other object types, including suffixes and extensions. In the case of datatypes, the key and ""value"" field are always the same. Some other object types, such as associated_data, common_principles, formats, modalities, and top_level_files, are not checked in the rules at all. Additionally, this test only checks rules that fit the keys. """""" OBJECT_TYPE_MAPPER = { ""metadata"": ""fields"", # metadata in objects is referred to as fields in rules } not_found = [] # A list of undefined, but referenced, objects object_types = list(schema_obj[""objects""].keys()) for object_type in object_types: # Find all uses of a given object type in the schema rules type_instances_in_rules = _dict_key_lookup( schema_obj[""rules""], OBJECT_TYPE_MAPPER.get(object_type, object_type), ) if not type_instances_in_rules: continue for type_instance in type_instances_in_rules: path, instance = type_instance is_list = True if isinstance(instance, dict): instance = list(instance.keys()) is_list = False for i_use, use in enumerate(instance): if use == ""derivatives"": # Skip derivatives folders, because the folder is treated as a ""use"" instead. continue elif ""[]"" in use: # Rules may reference metadata fields with lists. # This test can't handle this yet, so skip. continue elif ""{}"" in use: # Rules may reference sub-dictionaries in metadata fields. # This test can't handle this yet, so skip. continue if object_type in [""extensions"", ""suffixes""]: # Some object types are referenced via their ""value"" fields in the rules object_values = [ schema_obj[""objects""][object_type][k][""value""] for k in schema_obj[""objects""][object_type].keys() ] else: # But other object types are referenced via their keys object_values = list(schema_obj[""objects""][object_type].keys()) # Build a list of items mentioned in rules, but not found in objects. if use not in object_values: temp_path = path[:] if is_list: temp_path[-1] += f""[{i_use}]"" temp_path.append(use) not_found.append(temp_path) if not_found: not_found_string = ""\n"".join( [""."".join(sublist[:-1]) + "" == "" + sublist[-1] for sublist in not_found] ) raise Exception(not_found_string) ","def test_rule_objects(schema_obj): """"""Ensure that all objects referenced in the schema rules are defined in its object portion. This test currently fails because rules files reference object keys for some object types, including entities, columns, and metadata fields, but reference ""name"" or ""value"" elements of the object definitions for other object types, including suffixes and extensions. In the case of datatypes, the key and ""value"" field are always the same. Some other object types, such as associated_data, common_principles, formats, modalities, and top_level_files, are not checked in the rules at all. Additionally, this test only checks rules that fit the keys. """""" OBJECT_TYPE_MAPPER = { ""metadata"": ""fields"", # metadata in objects is referred to as fields in rules } not_found = [] # A list of undefined, but referenced, objects object_types = list(schema_obj[""objects""].keys()) for object_type in object_types: # Find all uses of a given object type in the schema rules type_instances_in_rules = _dict_key_lookup( schema_obj[""rules""], OBJECT_TYPE_MAPPER.get(object_type, object_type), ) if not type_instances_in_rules: continue for type_instance in type_instances_in_rules: path, instance = type_instance is_list = True if isinstance(instance, Mapping): instance = list(instance) is_list = False for i_use, use in enumerate(instance): if use == ""derivatives"": # Skip derivatives folders, because the folder is treated as a ""use"" instead. continue elif ""[]"" in use: # Rules may reference metadata fields with lists. # This test can't handle this yet, so skip. continue elif ""{}"" in use: # Rules may reference sub-dictionaries in metadata fields. # This test can't handle this yet, so skip. continue if object_type in [""extensions"", ""suffixes""]: # Some object types are referenced via their ""value"" fields in the rules object_values = [ schema_obj[""objects""][object_type][k][""value""] for k in schema_obj[""objects""][object_type].keys() ] else: # But other object types are referenced via their keys object_values = list(schema_obj[""objects""][object_type].keys()) # Build a list of items mentioned in rules, but not found in objects. if use not in object_values: temp_path = path[:] if is_list: temp_path[-1] += f""[{i_use}]"" temp_path.append(use) not_found.append(temp_path) if not_found: not_found_string = ""\n"".join( [""."".join(sublist[:-1]) + "" == "" + sublist[-1] for sublist in not_found] ) raise Exception(not_found_string) " 15659,"def test_catch_log_exception(): """"""Test it is stilll a callback after wrapping including partial."""""" async def async_meth(): pass assert asyncio.iscoroutinefunction( logging_util.catch_log_exception(partial(async_meth)) ) @callback def callback_meth(): pass assert is_callback(logging_util.catch_log_exception(partial(callback_meth))) def sync_meth(): pass wrapped = logging_util.catch_log_exception(partial(sync_meth)) assert not is_callback(wrapped) assert not asyncio.iscoroutinefunction(wrapped) ","def test_catch_log_exception(): """"""Test it is still a callback after wrapping including partial."""""" async def async_meth(): pass assert asyncio.iscoroutinefunction( logging_util.catch_log_exception(partial(async_meth)) ) @callback def callback_meth(): pass assert is_callback(logging_util.catch_log_exception(partial(callback_meth))) def sync_meth(): pass wrapped = logging_util.catch_log_exception(partial(sync_meth)) assert not is_callback(wrapped) assert not asyncio.iscoroutinefunction(wrapped) " 30742,"def validate_args(parse_all, header, value): if parse_all.lower() != 'true' and (not header or not value): return_error(""No parse all, and no header and value to know what to parse"") ","def validate_args(parse_all, header, value): if parse_all.lower() != 'true' and (not header or not value): return_error(""Error: If parse_all is false, both header and value must be specified."") " 22094,"def _prepare_rec(spec, ignorenets, neverignore): # First of all, let's see if we are supposed to ignore this spec, # and if so, do so. if 'addr' in spec and \ spec.get('source') not in neverignore.get(spec['recontype'], []): for start, stop in ignorenets.get(spec['recontype'], ()): if start <= utils.force_ip2int(spec['addr']) <= stop: yield None # Then, let's clean up the records. # Change Symantec's random user agents (matching SYMANTEC_UA) to # the constant string 'SymantecRandomUserAgent'. if spec['recontype'] == 'HTTP_CLIENT_HEADER' and \ spec.get('source') == 'USER-AGENT': if SYMANTEC_UA.match(spec['value']): spec['value'] = 'SymantecRandomUserAgent' elif KASPERSKY_UA.match(spec['value']): spec['value'] = 'KasperskyWeirdUserAgent' else: match = SYMANTEC_SEP_UA.match(spec['value']) if match is not None: spec['value'] = '%s%s' % match.groups() # Change any Digest authorization header to remove non-constant # information. On one hand we loose the necessary information to # try to recover the passwords, but on the other hand we store # specs with different challenges but the same username, realm, # host and sensor in the same records. elif ( spec['recontype'] in {'HTTP_CLIENT_HEADER', 'HTTP_CLIENT_HEADER_SERVER'} and spec.get('source') in {'AUTHORIZATION', 'PROXY-AUTHORIZATION'} ): value = spec['value'] if value: authtype = value.split(None, 1)[0] if authtype.lower() == 'digest': try: # we only keep relevant info spec['value'] = '%s %s' % (authtype, ','.join( val for val in _split_digest_auth(value[6:].strip()) if DIGEST_AUTH_INFOS.match(val) )) except Exception: utils.LOGGER.warning(""Cannot parse digest error for %r"", spec, exc_info=True) elif ntlm._is_ntlm_message(value): # NTLM_NEGOTIATE and NTLM_AUTHENTICATE ret = _prepare_rec_ntlm(spec, 'NTLM_CLIENT_FLAGS') spec = ret['spec'] if 'fingerprint' in ret: yield ret['fingerprint'] elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}: spec['value'] = authtype elif ( spec['recontype'] == 'HTTP_SERVER_HEADER' and spec.get('source') in {'WWW-AUTHENTICATE', 'PROXY-AUTHENTICATE'} ): value = spec['value'] if value: authtype = value.split(None, 1)[0] if authtype.lower() == 'digest': try: # we only keep relevant info spec['value'] = '%s %s' % (authtype, ','.join( val for val in _split_digest_auth(value[6:].strip()) if DIGEST_AUTH_INFOS.match(val) )) except Exception: utils.LOGGER.warning(""Cannot parse digest error for %r"", spec, exc_info=True) elif ntlm._is_ntlm_message(value): # NTLM_CHALLENGE ret = _prepare_rec_ntlm(spec, 'NTLM_SERVER_FLAGS') spec, fingerprint = ret['spec'], ret['fingerprint'] yield fingerprint elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}: spec['value'] = authtype # TCP server banners: try to normalize data elif spec['recontype'] == 'TCP_SERVER_BANNER': newvalue = value = utils.nmap_decode_data(spec['value']) for pattern, replace in TCP_SERVER_PATTERNS: if pattern.search(newvalue): newvalue = pattern.sub(replace, newvalue) if newvalue != value: spec['value'] = utils.nmap_encode_data(newvalue) elif spec['recontype'] in {'TCP_CLIENT_BANNER', 'TCP_HONEYPOT_HIT'}: if spec['value']: data = utils.nmap_decode_data(spec['value']) if data in scanners.TCP_PROBES: scanner, probe = scanners.TCP_PROBES[data] info = { 'service_name': 'scanner', 'service_product': scanner, } if probe is not None: info['service_extrainfo'] = 'TCP probe %s' % probe spec.setdefault('infos', {}).update(info) else: probe = utils.get_nmap_probes('tcp').get(data) if probe is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'TCP probe %s' % probe, }) elif spec['recontype'] == 'UDP_HONEYPOT_HIT': data = utils.nmap_decode_data(spec['value']) if data in scanners.UDP_PROBES: scanner, probe = scanners.UDP_PROBES[data] info = { 'service_name': 'scanner', 'service_product': scanner, } if probe is not None: info['service_extrainfo'] = 'UDP probe %s' % probe spec.setdefault('infos', {}).update(info) else: probe = utils.get_nmap_probes('udp').get(data) if probe is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'UDP probe %s' % probe, }) else: payload = utils.get_nmap_udp_payloads().get(data) if payload is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'UDP payload %s' % payload, }) # SSL_{CLIENT,SERVER} JA3 elif ((spec['recontype'] == 'SSL_CLIENT' and spec['source'] == 'ja3') or (spec['recontype'] == 'SSL_SERVER' and spec['source'].startswith('ja3-'))): value = spec['value'] spec.setdefault('infos', {})['raw'] = value spec['value'] = hashlib.new(""md5"", value.encode()).hexdigest() if spec['recontype'] == 'SSL_SERVER': clientvalue = spec['source'][4:] spec['infos'].setdefault('client', {})['raw'] = clientvalue spec['source'] = 'ja3-%s' % hashlib.new( ""md5"", clientvalue.encode(), ).hexdigest() # SSH_{CLIENT,SERVER}_HASSH elif spec['recontype'] in ['SSH_CLIENT_HASSH', 'SSH_SERVER_HASSH']: value = spec['value'] spec.setdefault('infos', {})['raw'] = value spec['value'] = hashlib.new(""md5"", value.encode()).hexdigest() # Check DNS Blacklist answer elif spec['recontype'] == 'DNS_ANSWER': if any((spec.get('value') or """").endswith(dnsbl) for dnsbl in config.DNS_BLACKLIST_DOMAINS): dnsbl_val = spec['value'] match = DNSBL_START.search(dnsbl_val) if match is not None: spec['recontype'] = 'DNS_BLACKLIST' spec['value'] = spec.get('addr') spec['source'] = ""%s-%s"" % (dnsbl_val[match.end():], spec['source']) addr = match.group() # IPv4 if addr.count('.') == 4: spec['addr'] = '.'.join(addr.split('.')[3::-1]) # IPv6 else: spec['addr'] = utils.int2ip6(int(addr .replace('.', '')[::-1], 16)) yield spec ","def _prepare_rec(spec, ignorenets, neverignore): # First of all, let's see if we are supposed to ignore this spec, # and if so, do so. if 'addr' in spec and \ spec.get('source') not in neverignore.get(spec['recontype'], []): for start, stop in ignorenets.get(spec['recontype'], ()): if start <= utils.force_ip2int(spec['addr']) <= stop: return # Then, let's clean up the records. # Change Symantec's random user agents (matching SYMANTEC_UA) to # the constant string 'SymantecRandomUserAgent'. if spec['recontype'] == 'HTTP_CLIENT_HEADER' and \ spec.get('source') == 'USER-AGENT': if SYMANTEC_UA.match(spec['value']): spec['value'] = 'SymantecRandomUserAgent' elif KASPERSKY_UA.match(spec['value']): spec['value'] = 'KasperskyWeirdUserAgent' else: match = SYMANTEC_SEP_UA.match(spec['value']) if match is not None: spec['value'] = '%s%s' % match.groups() # Change any Digest authorization header to remove non-constant # information. On one hand we loose the necessary information to # try to recover the passwords, but on the other hand we store # specs with different challenges but the same username, realm, # host and sensor in the same records. elif ( spec['recontype'] in {'HTTP_CLIENT_HEADER', 'HTTP_CLIENT_HEADER_SERVER'} and spec.get('source') in {'AUTHORIZATION', 'PROXY-AUTHORIZATION'} ): value = spec['value'] if value: authtype = value.split(None, 1)[0] if authtype.lower() == 'digest': try: # we only keep relevant info spec['value'] = '%s %s' % (authtype, ','.join( val for val in _split_digest_auth(value[6:].strip()) if DIGEST_AUTH_INFOS.match(val) )) except Exception: utils.LOGGER.warning(""Cannot parse digest error for %r"", spec, exc_info=True) elif ntlm._is_ntlm_message(value): # NTLM_NEGOTIATE and NTLM_AUTHENTICATE ret = _prepare_rec_ntlm(spec, 'NTLM_CLIENT_FLAGS') spec = ret['spec'] if 'fingerprint' in ret: yield ret['fingerprint'] elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}: spec['value'] = authtype elif ( spec['recontype'] == 'HTTP_SERVER_HEADER' and spec.get('source') in {'WWW-AUTHENTICATE', 'PROXY-AUTHENTICATE'} ): value = spec['value'] if value: authtype = value.split(None, 1)[0] if authtype.lower() == 'digest': try: # we only keep relevant info spec['value'] = '%s %s' % (authtype, ','.join( val for val in _split_digest_auth(value[6:].strip()) if DIGEST_AUTH_INFOS.match(val) )) except Exception: utils.LOGGER.warning(""Cannot parse digest error for %r"", spec, exc_info=True) elif ntlm._is_ntlm_message(value): # NTLM_CHALLENGE ret = _prepare_rec_ntlm(spec, 'NTLM_SERVER_FLAGS') spec, fingerprint = ret['spec'], ret['fingerprint'] yield fingerprint elif authtype.lower() in {'negotiate', 'kerberos', 'oauth'}: spec['value'] = authtype # TCP server banners: try to normalize data elif spec['recontype'] == 'TCP_SERVER_BANNER': newvalue = value = utils.nmap_decode_data(spec['value']) for pattern, replace in TCP_SERVER_PATTERNS: if pattern.search(newvalue): newvalue = pattern.sub(replace, newvalue) if newvalue != value: spec['value'] = utils.nmap_encode_data(newvalue) elif spec['recontype'] in {'TCP_CLIENT_BANNER', 'TCP_HONEYPOT_HIT'}: if spec['value']: data = utils.nmap_decode_data(spec['value']) if data in scanners.TCP_PROBES: scanner, probe = scanners.TCP_PROBES[data] info = { 'service_name': 'scanner', 'service_product': scanner, } if probe is not None: info['service_extrainfo'] = 'TCP probe %s' % probe spec.setdefault('infos', {}).update(info) else: probe = utils.get_nmap_probes('tcp').get(data) if probe is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'TCP probe %s' % probe, }) elif spec['recontype'] == 'UDP_HONEYPOT_HIT': data = utils.nmap_decode_data(spec['value']) if data in scanners.UDP_PROBES: scanner, probe = scanners.UDP_PROBES[data] info = { 'service_name': 'scanner', 'service_product': scanner, } if probe is not None: info['service_extrainfo'] = 'UDP probe %s' % probe spec.setdefault('infos', {}).update(info) else: probe = utils.get_nmap_probes('udp').get(data) if probe is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'UDP probe %s' % probe, }) else: payload = utils.get_nmap_udp_payloads().get(data) if payload is not None: spec.setdefault('infos', {}).update({ 'service_name': 'scanner', 'service_product': 'Nmap', 'service_extrainfo': 'UDP payload %s' % payload, }) # SSL_{CLIENT,SERVER} JA3 elif ((spec['recontype'] == 'SSL_CLIENT' and spec['source'] == 'ja3') or (spec['recontype'] == 'SSL_SERVER' and spec['source'].startswith('ja3-'))): value = spec['value'] spec.setdefault('infos', {})['raw'] = value spec['value'] = hashlib.new(""md5"", value.encode()).hexdigest() if spec['recontype'] == 'SSL_SERVER': clientvalue = spec['source'][4:] spec['infos'].setdefault('client', {})['raw'] = clientvalue spec['source'] = 'ja3-%s' % hashlib.new( ""md5"", clientvalue.encode(), ).hexdigest() # SSH_{CLIENT,SERVER}_HASSH elif spec['recontype'] in ['SSH_CLIENT_HASSH', 'SSH_SERVER_HASSH']: value = spec['value'] spec.setdefault('infos', {})['raw'] = value spec['value'] = hashlib.new(""md5"", value.encode()).hexdigest() # Check DNS Blacklist answer elif spec['recontype'] == 'DNS_ANSWER': if any((spec.get('value') or """").endswith(dnsbl) for dnsbl in config.DNS_BLACKLIST_DOMAINS): dnsbl_val = spec['value'] match = DNSBL_START.search(dnsbl_val) if match is not None: spec['recontype'] = 'DNS_BLACKLIST' spec['value'] = spec.get('addr') spec['source'] = ""%s-%s"" % (dnsbl_val[match.end():], spec['source']) addr = match.group() # IPv4 if addr.count('.') == 4: spec['addr'] = '.'.join(addr.split('.')[3::-1]) # IPv6 else: spec['addr'] = utils.int2ip6(int(addr .replace('.', '')[::-1], 16)) yield spec " 19617,"def getgccversion(chost=None): """""" rtype: C{str} return: the current in-use gcc version """""" gcc_ver_command = [""gcc"", ""-dumpversion""] gcc_ver_prefix = ""gcc-"" # accept clang as system compiler too clang_ver_command = [""clang"", ""--version""] clang_ver_prefix = ""clang-"" ubinpath = os.path.join(""/"", portage.const.EPREFIX, ""usr"", ""bin"") def getclangversion(output): version = re.search(""clang version ([0-9.]+) "", output) if version: return version.group(1) return ""unknown"" gcc_not_found_error = red( ""!!! No gcc found. You probably need to 'source /etc/profile'\n"" + ""!!! to update the environment of this terminal and possibly\n"" + ""!!! other terminals also.\n"" ) if chost: try: proc = subprocess.Popen( [ubinpath + ""/"" + ""gcc-config"", ""-c""], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK and myoutput.startswith(chost + ""-""): return myoutput.replace(chost + ""-"", gcc_ver_prefix, 1) try: proc = subprocess.Popen( [ubinpath + ""/"" + chost + ""-"" + gcc_ver_command[0]] + gcc_ver_command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return gcc_ver_prefix + myoutput # no GCC? try Clang try: proc = subprocess.Popen( [ubinpath + ""/"" + chost + ""-"" + clang_ver_command[0]] + clang_ver_command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return clang_ver_prefix + getclangversion(myoutput) try: proc = subprocess.Popen( gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return gcc_ver_prefix + myoutput portage.writemsg(gcc_not_found_error, noiselevel=-1) return ""[unavailable]"" ","def getgccversion(chost=None): """""" rtype: C{str} return: the current in-use gcc version """""" gcc_ver_command = [""gcc"", ""-dumpversion""] gcc_ver_prefix = ""gcc-"" # accept clang as system compiler too clang_ver_command = [""clang"", ""--version""] clang_ver_prefix = ""clang-"" ubinpath = os.path.join(""/"", portage.const.EPREFIX, ""usr"", ""bin"") def getclangversion(output): version = re.search(""clang version ([0-9.]+) "", output) if version: return version.group(1) return ""unknown"" gcc_not_found_error = red( ""!!! No gcc found. You probably need to 'source /etc/profile'\n"" + ""!!! to update the environment of this terminal and possibly\n"" + ""!!! other terminals also.\n"" ) if chost: try: proc = subprocess.Popen( [ubinpath + ""/"" + ""gcc-config"", ""-c""], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK and myoutput.startswith(chost + ""-""): return myoutput.replace(chost + ""-"", gcc_ver_prefix, 1) try: proc = subprocess.Popen( [ubinpath + ""/"" + chost + ""-"" + gcc_ver_command[0]] + gcc_ver_command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return gcc_ver_prefix + myoutput # no GCC? try Clang try: proc = subprocess.Popen( [usr_bin_path + ""/"" + chost + ""-"" + clang_ver_command[0]] + clang_ver_command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return clang_ver_prefix + getclangversion(myoutput) try: proc = subprocess.Popen( gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) except OSError: myoutput = None mystatus = 1 else: myoutput = _unicode_decode(proc.communicate()[0]).rstrip(""\n"") mystatus = proc.wait() if mystatus == os.EX_OK: return gcc_ver_prefix + myoutput portage.writemsg(gcc_not_found_error, noiselevel=-1) return ""[unavailable]"" " 1370,"def pairwise_kernels(X, Y=None, metric=""linear"", filter_params=False, n_jobs=None, **kwds): """"""Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine'] Read more in the :ref:`User Guide `. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == ""precomputed"", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is ""precomputed"", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return a value indicating the distance between them. This means that callables from ``sklearn.metrics.pairwise`` are not allowed, as they operate on matrices, not single samples. Use the string identifying this metric instead. filter_params : boolean Whether to filter invalid parameters or not. n_jobs : int or None, optional (default=None) The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. **kwds : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """""" # import GPKernel locally to prevent circular imports from ..gaussian_process.kernels import Kernel as GPKernel if metric == ""precomputed"": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif isinstance(metric, GPKernel): func = metric.__call__ elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]} func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError(""Unknown kernel %r"" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) ","def pairwise_kernels(X, Y=None, metric=""linear"", filter_params=False, n_jobs=None, **kwds): """"""Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine'] Read more in the :ref:`User Guide `. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == ""precomputed"", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is ""precomputed"", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return a value indicating the distance between them. This means that callables from ``sklearn.metrics.pairwise`` are not allowed, as they operate on matrices, not single samples. Use the string identifying the kernel instead. filter_params : boolean Whether to filter invalid parameters or not. n_jobs : int or None, optional (default=None) The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. **kwds : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """""" # import GPKernel locally to prevent circular imports from ..gaussian_process.kernels import Kernel as GPKernel if metric == ""precomputed"": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif isinstance(metric, GPKernel): func = metric.__call__ elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]} func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError(""Unknown kernel %r"" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) " 7881,"def dose_coefficients(particle, geometry='AP'): """"""Return effective dose conversion coefficients from ICRP-116 This function provides fluence to dose conversion coefficients for effective dose for various types of external exposures based on values in `ICRP Publication 116 `_. Parameters ---------- particle : {'neutron', 'photon', 'electron', 'positron'} Incident particle geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'} Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the options here. Returns ------- energy : numpy.ndarray Energies at which dose conversion coefficients are given dose : numpy.ndarray Effective dose in [pSv cm^2] at provided energies """""" if not _DOSE_ICRP116: _load_dose_icrp116() # Get all data for selected particle data = _DOSE_ICRP116.get(particle) if data is None: raise ValueError(""{} has no effective dose data"".format(particle)) # Determine index for selected geometry if particle in ('neutron', 'photon', 'proton'): index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry) else: index = ('AP', 'PA', 'ISO').index(geometry) # Pull out energy and dose from table energy = data[:, 0].copy() dose = data[:, index + 1].copy() return energy, dose ","def dose_coefficients(particle, geometry='AP'): """"""Return effective dose conversion coefficients from ICRP-116 This function provides fluence to dose conversion coefficients for effective dose for various types of external exposures based on values in `ICRP Publication 116 `_. Parameters ---------- particle : {'neutron', 'photon', 'electron', 'positron'} Incident particle geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'} Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the options here. Returns ------- energy : numpy.ndarray Energies at which dose conversion coefficients are given dose_coeffs : numpy.ndarray Effective dose in [pSv cm^2] at provided energies """""" if not _DOSE_ICRP116: _load_dose_icrp116() # Get all data for selected particle data = _DOSE_ICRP116.get(particle) if data is None: raise ValueError(""{} has no effective dose data"".format(particle)) # Determine index for selected geometry if particle in ('neutron', 'photon', 'proton'): index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry) else: index = ('AP', 'PA', 'ISO').index(geometry) # Pull out energy and dose from table energy = data[:, 0].copy() dose = data[:, index + 1].copy() return energy, dose " 58070,"def create_tag_command(client: Client, args: Dict[str, str]) -> CommandResults: """""" create_tag command: Creates a new tag in the CTIX platform """""" name = str(args.get(""tag_name"")) color_code = str(args.get(""color_code"")) if color_code is not None: if not color_code.startswith(""#"") or len(color_code) != 7: return_warning( ""Color code is invalid, it should be a hex code: {}"".format(color_code) ) response = client.create_tag(name, color_code) data = response.get(""data"") results = CommandResults( readable_output=tableToMarkdown(""Tag Data"", data, removeNull=True), outputs_prefix=""CTIX.Tag"", outputs_key_field=""name"", outputs=data, raw_response=data, ) return results ","def create_tag_command(client: Client, args: Dict[str, str]) -> CommandResults: """""" create_tag command: Creates a new tag in the CTIX platform """""" name = args.get(""tag_name"") color_code = args.get(""color_code"") if color_code is not None: if not color_code.startswith(""#"") or len(color_code) != 7: return_warning( ""Color code is invalid, it should be a hex code: {}"".format(color_code) ) response = client.create_tag(name, color_code) data = response.get(""data"") results = CommandResults( readable_output=tableToMarkdown(""Tag Data"", data, removeNull=True), outputs_prefix=""CTIX.Tag"", outputs_key_field=""name"", outputs=data, raw_response=data, ) return results " 30898,"def test_get_indicators_command_with_no_data_to_return(mocker): """"""Tests get_indicators_command function with no data to return Given 'indicator_types': ['value', 'user-account'], 'limit': 2 When - Calling `get_indicators_command` Then - validate it returns that there are no indicators """""" client = Client args = { 'indicator_types': ['value', 'user-account'], 'limit': 2 } mocker.patch.object(client.stix_observable, 'list', return_value=RESPONSE_DATA_WITHOUT_INDICATORS) results: CommandResults = get_indicators_command(client, args) assert ""No indicators"" in results.readable_output ","def test_get_indicators_command_with_no_data_to_return(mocker): """"""Tests get_indicators_command function with no data to return Given 'indicator_types': ['value', 'user-account'], 'limit': 2 When - Calling `get_indicators_command` Then - validate the response to have a ""No indicators"" string """""" client = Client args = { 'indicator_types': ['value', 'user-account'], 'limit': 2 } mocker.patch.object(client.stix_observable, 'list', return_value=RESPONSE_DATA_WITHOUT_INDICATORS) results: CommandResults = get_indicators_command(client, args) assert ""No indicators"" in results.readable_output " 7432,"def stain_color_matrix(colors): """"""Creates a stain color matrix for a combination of stains. This routine knows some common stains, their colors are taken from other tools implementing stain unmixing, but will likely not exactly match the colors of the stains in your image. This is because the color of a stain depends on many factors, including the chemistry, the microscope light source, and the RGB camera capturing the image. It is always best to measure your stain colors. Known stains are: ""Hematoxylin"" ""Eosin"" ""DAB"" ""AEC"" ""Alcian Blue"" ""Aniline Blue"" ""Azocarmine"" ""FastBlue"" ""FastRed"" ""Feulgen"" ""Light Green"" ""Methyl Blue"" ""Methyl Green"" ""Orange-G"" ""PAS"" ""Ponceau Fuchsin"" See separate_stains() and combine_stains(). Parameters ---------- colors : iterable with 1 to 3 elements. Each element must be either a string for a known stain name (see below) or an RGB triplet in the form of an iterable. Returns ------- out : (..., 3) ndarray The stain color matrix, an Nx3 matrix, where N is the length of the input `colors`. Raises ------ ValueError If `colors` contains an unknown stain name or an illegal RGB triplet, or if `colors` is empty or has more than 3 elements. References ---------- .. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html """""" # Following matrices are adapted form the Java code written by G.Landini. # https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html # Similar values can be found in CellProfiler: # https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py stain_colors = { ""Hematoxylin"": (0.650, 0.704, 0.286), ""Eosin"": (0.092789, 0.954111, 0.283111), ""DAB"": (0.268, 0.570, 0.776), ""AEC"": (0.2743, 0.6796, 0.6803), ""Alcian Blue"": (0.552556, 0.7544, 0.353744), ""Aniline Blue"": (0.853033, 0.508733, 0.112656), ""Azocarmine"": (0.09289875, 0.8662008, 0.49098468), ""FastBlue"": (0.74890292, 0.60624161, 0.26731082), ""FastRed"": (0.21393921, 0.85112669, 0.47794022), ""Feulgen"": (0.46420921, 0.83008335, 0.30827187), ""Light Green"": (0.94705542, 0.25373821, 0.19650764), ""Methyl Blue"": (0.7995107, 0.5913521, 0.10528667), ""Methyl Green"": (0.98003, 0.144316, 0.133146), ""Orange-G"": (0.10732849, 0.36765403, 0.9237484), ""PAS"": (0.175411, 0.972178, 0.154589), ""Ponceau Fuchsin"": (0.09997159, 0.73738605, 0.6680326), } N = len(colors) if N < 1 or N > 3: msg = (f'the input `colors` must have between 1 and 3 elements, got {N}') raise ValueError(msg) out = np.zeros((N, 3)) for ii, val in enumerate(colors): if isinstance(val, str): if not val in stain_colors: msg = (f'the input `colors` contains {val}, which I do not recognize as a stain') raise ValueError(msg) val = stain_colors[val] else: if len(val) != 3 or not all(isinstance(v, float) for v in val): msg = (f'the input `colors` contains {val}, which is not an RGB triplet') raise ValueError(msg) norm = np.linalg.norm(val) val = [v / norm for v in val] out[ii, :] = val return out ","def stain_color_matrix(colors): """"""Creates a stain color matrix for a combination of stains. This routine knows some common stains, their colors are taken from other tools implementing stain unmixing, but will likely not exactly match the colors of the stains in your image. This is because the color of a stain depends on many factors, including the chemistry, the microscope light source, and the RGB camera capturing the image. It is always best to measure your stain colors. Known stains are: ""Hematoxylin"" ""Eosin"" ""DAB"" ""AEC"" ""Alcian Blue"" ""Aniline Blue"" ""Azocarmine"" ""FastBlue"" ""FastRed"" ""Feulgen"" ""Light Green"" ""Methyl Blue"" ""Methyl Green"" ""Orange-G"" ""PAS"" ""Ponceau Fuchsin"" See separate_stains() and combine_stains(). Parameters ---------- colors : iterable with 1 to 3 elements. Each element must be either a string for a known stain name (see below) or an RGB triplet in the form of an iterable. Returns ------- out : (..., 3) ndarray The stain color matrix, an Nx3 matrix, where N is the length of the input `colors`. Raises ------ ValueError If `colors` contains an unknown stain name or an illegal RGB triplet, or if `colors` is empty or has more than 3 elements. References ---------- .. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html """""" # Following matrices are adapted form the Java code written by G.Landini. # https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html # Similar values can be found in CellProfiler: # https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py stain_colors = { ""Hematoxylin"": (0.650, 0.704, 0.286), ""Eosin"": (0.092789, 0.954111, 0.283111), ""DAB"": (0.268, 0.570, 0.776), ""AEC"": (0.2743, 0.6796, 0.6803), ""Alcian Blue"": (0.552556, 0.7544, 0.353744), ""Aniline Blue"": (0.853033, 0.508733, 0.112656), ""Azocarmine"": (0.09289875, 0.8662008, 0.49098468), ""FastBlue"": (0.74890292, 0.60624161, 0.26731082), ""FastRed"": (0.21393921, 0.85112669, 0.47794022), ""Feulgen"": (0.46420921, 0.83008335, 0.30827187), ""Light Green"": (0.94705542, 0.25373821, 0.19650764), ""Methyl Blue"": (0.7995107, 0.5913521, 0.10528667), ""Methyl Green"": (0.98003, 0.144316, 0.133146), ""Orange-G"": (0.10732849, 0.36765403, 0.9237484), ""PAS"": (0.175411, 0.972178, 0.154589), ""Ponceau Fuchsin"": (0.09997159, 0.73738605, 0.6680326), } N = len(colors) if N < 1 or N > 3: msg = (f'the input `colors` must have between 1 and 3 elements, got {N}') raise ValueError(msg) out = np.zeros((N, 3)) for ii, val in enumerate(colors): if isinstance(val, str): if not val in stain_colors: msg = (f'the input `colors` contains {val}, which is not a known stain') raise ValueError(msg) val = stain_colors[val] else: if len(val) != 3 or not all(isinstance(v, float) for v in val): msg = (f'the input `colors` contains {val}, which is not an RGB triplet') raise ValueError(msg) norm = np.linalg.norm(val) val = [v / norm for v in val] out[ii, :] = val return out " 30283,"def createPsDocumentation(path, origin, language): isErrorPS = False with open(path, 'r') as file: ps_script = file.read() function_doc_list = list() functions_list = re.findall(r'function\s([\w_]*)\s{\s*<#\s*(.*?)#>', ps_script, re.S) for function in functions_list: function_doc = { 'language': language, 'origin': origin } function_name = function[0] function_doc['name'] = function_name parameters = function[1].split('.PARAMETER') description = parameters[0].split('.DESCRIPTION')[1].strip() if not description: isErrorPS = True print(""Missing description for in PS function {}.\n"".format(function_name)) function_doc['description'] = description arguments = [] for parameter in parameters[1:]: split_param = list(filter(None, parameter.split('\n'))) required = False param_name = split_param[0].strip() if 'required' in param_name: required = True param_name = param_name.replace(' (required)', '') param_description = split_param[1] if not param_description: isErrorPS = True print(""Missing parameter description for parameter {} for in PS function {}.\n"".format( param_name, function_name)) arguments.append({ 'name': param_name, 'description': param_description, 'required': required }) function_doc['arguments'] = arguments function_doc_list.append(function_doc) return function_doc_list, isErrorPS ","def createPsDocumentation(path, origin, language): isErrorPS = False with open(path, 'r') as file: ps_script = file.read() function_doc_list = list() functions_list = re.findall(r'function\s([\w_]*)\s{\s*<#\s*(.*?)#>', ps_script, re.S) for function in functions_list: function_doc = { 'language': language, 'origin': origin } function_name = function[0] function_doc['name'] = function_name parameters = function[1].split('.PARAMETER') description = parameters[0].split('.DESCRIPTION')[1].strip() if not description: isErrorPS = True print(""Missing description for PS function {}.\n"".format(function_name)) function_doc['description'] = description arguments = [] for parameter in parameters[1:]: split_param = list(filter(None, parameter.split('\n'))) required = False param_name = split_param[0].strip() if 'required' in param_name: required = True param_name = param_name.replace(' (required)', '') param_description = split_param[1] if not param_description: isErrorPS = True print(""Missing parameter description for parameter {} for in PS function {}.\n"".format( param_name, function_name)) arguments.append({ 'name': param_name, 'description': param_description, 'required': required }) function_doc['arguments'] = arguments function_doc_list.append(function_doc) return function_doc_list, isErrorPS " 42919,"def is_clique(graph: nx.Graph) -> bool: """"""Determines if the input graph is a clique. A clique of :math:`n` nodes has :math:`n*(n-1)/2` edges. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> utils.is_clique(graph) True Args: graph (nx.Graph): The input graph. Returns: bool: ``True`` if input graph is a clique and ``False`` otherwise. """""" edges = graph.edges nodes = graph.order() return len(edges) == nodes * (nodes - 1) / 2 ","def is_clique(graph: nx.Graph) -> bool: """"""Determines if the input graph is a clique. A clique of :math:`n` nodes has :math:`n*(n-1)/2` edges. Example usage: .. code-block:: >>> from strawberryfields.apps.graph import utils >>> import networkx as nx >>> graph = nx.complete_graph(10) >>> utils.is_clique(graph) True Args: graph (nx.Graph): the input graph Returns: bool: ``True`` if input graph is a clique and ``False`` otherwise. """""" edges = graph.edges nodes = graph.order() return len(edges) == nodes * (nodes - 1) / 2 " 29725,"def get_device_mig_mode(device): """"""Get MIG mode for a device index or UUID Parameters ---------- device: ``int``, ``bytes`` or``str`` An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID of a CUDA (either GPU or MIG) device. Returns ------- out: ``list`` A ``list`` with two integers ``[current_mode, pending_mode]``. """""" init_once() try: device_index = int(device) handle = pynvml.nvmlDeviceGetHandleByIndex(device_index) except ValueError: uuid = device if isinstance(device, bytes) else bytes(device, ""utf-8"") handle = pynvml.nvmlDeviceGetHandleByUUID(uuid) try: return pynvml.nvmlDeviceGetMigMode(handle) except pynvml.NVMLError_NotSupported: return [0, 0] ","def get_device_mig_mode(device): """"""Get MIG mode for a device index or UUID Parameters ---------- device: ``int``, ``bytes`` or``str`` An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID of a CUDA (either GPU or MIG) device. Returns ------- out : list A ``list`` with two integers ``[current_mode, pending_mode]``. """""" init_once() try: device_index = int(device) handle = pynvml.nvmlDeviceGetHandleByIndex(device_index) except ValueError: uuid = device if isinstance(device, bytes) else bytes(device, ""utf-8"") handle = pynvml.nvmlDeviceGetHandleByUUID(uuid) try: return pynvml.nvmlDeviceGetMigMode(handle) except pynvml.NVMLError_NotSupported: return [0, 0] " 42985,"def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (ndarray): The state that the BS is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: ndarray: State where the two-mode operation has been applied """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret ","def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the specified two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (ndarray): The state that the BS is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: ndarray: State where the two-mode operation has been applied """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret " 5654,"def test_bug_11886(): def opt(x): return x[0]**2+x[1]**2 with np.testing.suppress_warnings() as sup: sup.filter(PendingDeprecationWarning) A = np.matrix(np.diag([1, 1])) lin_cons = LinearConstraint(A, -1, np.inf) minimize(opt, 2*[1], constraints = lin_cons) ","def test_bug_11886(): def opt(x): return x[0]**2+x[1]**2 with np.testing.suppress_warnings() as sup: sup.filter(PendingDeprecationWarning) A = np.matrix(np.diag([1, 1])) lin_cons = LinearConstraint(A, -1, np.inf) minimize(opt, 2*[1], constraints = lin_cons) # just checking that there are no errors " 2057,"def _deprecate_positional_args(f): """"""Decorator for methods that issues warnings for positional arguments. Using the keyword-only argument syntax in pep 3102, arguments after the * will issue a warning when passed as a positional argument. Parameters ---------- f : function function to check arguments on. """""" sig = signature(f) kwonly_args = [] all_args = [] for name, param in sig.parameters.items(): if param.kind == Parameter.POSITIONAL_OR_KEYWORD: all_args.append(name) elif param.kind == Parameter.KEYWORD_ONLY: kwonly_args.append(name) @wraps(f) def inner_f(*args, **kwargs): extra_args = len(args) - len(all_args) if extra_args <= 0: return f(*args, **kwargs) # extra_args > 0 args_msg = ['{}={}'.format(name, arg) for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])] warnings.warn(""Pass {} as keyword args. From version 0.25 "" ""passing these as positional arguments will "" ""result in an error"".format("", "".join(args_msg)), FutureWarning) kwargs.update(zip(sig.parameters, args)) return f(**kwargs) return inner_f ","def _deprecate_positional_args(f): """"""Decorator for methods that issues warnings for positional arguments. Using the keyword-only argument syntax in pep 3102, arguments after the * will issue a warning when passed as a positional argument. Parameters ---------- f : function Function to check arguments on. """""" sig = signature(f) kwonly_args = [] all_args = [] for name, param in sig.parameters.items(): if param.kind == Parameter.POSITIONAL_OR_KEYWORD: all_args.append(name) elif param.kind == Parameter.KEYWORD_ONLY: kwonly_args.append(name) @wraps(f) def inner_f(*args, **kwargs): extra_args = len(args) - len(all_args) if extra_args <= 0: return f(*args, **kwargs) # extra_args > 0 args_msg = ['{}={}'.format(name, arg) for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])] warnings.warn(""Pass {} as keyword args. From version 0.25 "" ""passing these as positional arguments will "" ""result in an error"".format("", "".join(args_msg)), FutureWarning) kwargs.update(zip(sig.parameters, args)) return f(**kwargs) return inner_f " 20222,"def download_files(): """"""Download the latest IPEDS Institutional Characterstics file."""""" for slug in ['universe', 'data', 'services']: url = DATA_VARS['{}_url'.format(slug)] target = DATA_VARS['{}_zip'.format(slug)] target_slug = target.split('/')[-1] if download_zip_file(url, target): print(""Downloaded {}"".format(target_slug)) else: print(""Failed to download {}"".format(target_slug)) clean_csv_headings() ","def download_files(): """"""Download the latest IPEDS Institutional Characteristics file."""""" for slug in ['universe', 'data', 'services']: url = DATA_VARS['{}_url'.format(slug)] target = DATA_VARS['{}_zip'.format(slug)] target_slug = target.split('/')[-1] if download_zip_file(url, target): print(""Downloaded {}"".format(target_slug)) else: print(""Failed to download {}"".format(target_slug)) clean_csv_headings() " 24809,"def _comment(string: str) -> str: """"""return string as a comment"""""" lines = [line.strip() for line in string.splitlines()] sep = ""\n"" return ""# "" + f""{sep}# "".join(lines) ","def _comment(string: str) -> str: """"""return string as a comment"""""" lines = [line.strip() for line in string.splitlines()] return ""# "" + ""\n# "".join(lines) " 54276,"def read_tasoc_lightcurve(filename, flux_column=""FLUX_RAW"", quality_bitmask=""default""): """"""Returns a `TessLightCurve`. Parameters ---------- filename : str Local path or remote url of TASOC light curve FITS file. flux_column : 'flux_RAW' - this contains the T'DA extracted lightcurve, with no corrections applied to the raw light curves. Corrected lightcurves may be a thing in the future as there is a flux_corr column. quality_bitmask : For now this always none - as no calibration applied """""" lc = read_generic_lightcurve(filename, flux_column=flux_column.lower(), time_format='btjd', quality_column=""QUALITY"") # Filter out poor-quality data # NOTE: Unfortunately Astropy Table masking does not yet work for columns # that are Quantity objects, so for now we remove poor-quality data instead # of masking. Details: https://github.com/astropy/astropy/issues/10119 #quality_mask = TessQualityFlags.create_quality_mask( # quality_array=lc['dquality'], # bitmask=quality_bitmask) #lc = lc[quality_mask] lc.meta['TARGETID'] = lc.meta.get('TICID') lc.meta['QUALITY_BITMASK'] = quality_bitmask #lc.meta['QUALITY_MASK'] = quality_mask # QLP light curves are normalized by default lc.meta['NORMALIZED'] = True return TessLightCurve(data=lc) ","def read_tasoc_lightcurve(filename, flux_column=""FLUX_RAW"", quality_bitmask=""default""): """"""Returns a `TessLightCurve`. Parameters ---------- filename : str Local path or remote url of TASOC light curve FITS file. flux_column : str Column that will be used to populate the flux values. By default, ""FLUX_RAW"" is used. It contains the T'DA extracted lightcurve, with no corrections applied to the raw light curves. Corrected lightcurves may become available in the future. quality_bitmask : For now this always none - as no calibration applied """""" lc = read_generic_lightcurve(filename, flux_column=flux_column.lower(), time_format='btjd', quality_column=""QUALITY"") # Filter out poor-quality data # NOTE: Unfortunately Astropy Table masking does not yet work for columns # that are Quantity objects, so for now we remove poor-quality data instead # of masking. Details: https://github.com/astropy/astropy/issues/10119 #quality_mask = TessQualityFlags.create_quality_mask( # quality_array=lc['dquality'], # bitmask=quality_bitmask) #lc = lc[quality_mask] lc.meta['TARGETID'] = lc.meta.get('TICID') lc.meta['QUALITY_BITMASK'] = quality_bitmask #lc.meta['QUALITY_MASK'] = quality_mask # QLP light curves are normalized by default lc.meta['NORMALIZED'] = True return TessLightCurve(data=lc) " 29769,"def get_user_timeline_events_by_id(id: int) -> UserTimelineEvent: """""" Gets event by it's id Args: id: row ID of the timeline event """""" try: with db.engine.connect() as connection: result = connection.execute(sqlalchemy.text("""""" SELECT id, user_id, event_type, metadata, created FROM user_timeline_event WHERE id = :id """"""), { ""id"": id, }) r = dict(result.fetchone()) return UserTimelineEvent(**r) except Exception as e: raise DatabaseException(str(e)) ","def get_user_timeline_event_by_id(id: int) -> UserTimelineEvent: """""" Gets event by it's id Args: id: row ID of the timeline event """""" try: with db.engine.connect() as connection: result = connection.execute(sqlalchemy.text("""""" SELECT id, user_id, event_type, metadata, created FROM user_timeline_event WHERE id = :id """"""), { ""id"": id, }) r = dict(result.fetchone()) return UserTimelineEvent(**r) except Exception as e: raise DatabaseException(str(e)) " 10669,"def get_graph_kwargs(node_source, edge_source, **kwargs): if not isinstance(node_source, ColumnarDataSource): try: # try converting the source to ColumnDataSource node_source = ColumnDataSource(node_source) except ValueError as err: msg = ""Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}"".format( curr_type=str(type(node_source)), err=err.message ) raise ValueError(msg).with_traceback(sys.exc_info()[2]) if not isinstance(edge_source, ColumnarDataSource): try: # try converting the source to ColumnDataSource edge_source = ColumnDataSource(edge_source) except ValueError as err: msg = ""Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}"".format( curr_type=str(type(edge_source)), err=err.message ) raise ValueError(msg).with_traceback(sys.exc_info()[2]) marker = kwargs.pop('node_marker', None) marker_type = Scatter if isinstance(marker, dict) and 'field' in marker or marker in node_source.data: kwargs['node_marker'] = field(marker) else: if isinstance(marker, dict) and 'value' in marker: marker = marker['value'] if marker is None or marker == ""circle"": marker_type = Circle else: kwargs[""node_marker""] = marker ## node stuff node_visuals = pop_visuals(marker_type, kwargs, prefix=""node_"") if any(x.startswith('node_selection_') for x in kwargs): snode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_selection_"", defaults=node_visuals) else: snode_visuals = None if any(x.startswith('node_hover_') for x in kwargs): hnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_hover_"", defaults=node_visuals) else: hnode_visuals = None #Always set muted glyph mnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_muted_"", defaults=node_visuals, override_defaults={'alpha':0.2}) #Always set nonselection glyph nsnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_nonselection_"", defaults=node_visuals) ## edge stuff edge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_"") if any(x.startswith('edge_selection_') for x in kwargs): sedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_selection_"", defaults=edge_visuals) else: sedge_visuals = None if any(x.startswith('edge_hover_') for x in kwargs): hedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_hover_"", defaults=edge_visuals) else: hedge_visuals = None #Always set muted glyph medge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_muted_"", defaults=edge_visuals, override_defaults={'alpha':0.2}) #Always set nonselection glyph nsedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_nonselection_"", defaults=edge_visuals) ## node stuff node_kwargs = {k.lstrip('node_'): v for k, v in kwargs.copy().items() if k.lstrip('node_') in marker_type.properties()} node_glyph = make_glyph(marker_type, node_kwargs, node_visuals) nsnode_glyph = make_glyph(marker_type, node_kwargs, nsnode_visuals) snode_glyph = make_glyph(marker_type, node_kwargs, snode_visuals) hnode_glyph = make_glyph(marker_type, node_kwargs, hnode_visuals) mnode_glyph = make_glyph(marker_type, node_kwargs, mnode_visuals) node_renderer = GlyphRenderer( data_source=node_source, glyph=node_glyph, selection_glyph=snode_glyph or ""auto"", nonselection_glyph=nsnode_glyph or ""auto"", hover_glyph=hnode_glyph, muted_glyph=mnode_glyph or ""auto"", ) ## edge stuff edge_kwargs = {k.lstrip('edge_'): v for k, v in kwargs.copy().items() if k.lstrip('edge_') in MultiLine.properties()} edge_glyph = make_glyph(MultiLine, edge_kwargs, edge_visuals) nsedge_glyph = make_glyph(MultiLine, edge_kwargs, nsedge_visuals) sedge_glyph = make_glyph(MultiLine, edge_kwargs, sedge_visuals) hedge_glyph = make_glyph(MultiLine, edge_kwargs, hedge_visuals) medge_glyph = make_glyph(MultiLine, edge_kwargs, medge_visuals) edge_renderer = GlyphRenderer( data_source=edge_source, glyph=edge_glyph, selection_glyph=sedge_glyph or ""auto"", nonselection_glyph=nsedge_glyph or ""auto"", hover_glyph=hedge_glyph, muted_glyph=medge_glyph or ""auto"", ) renderer_kwargs = {attr: kwargs.pop(attr) for attr in RENDERER_ARGS if attr in kwargs} renderer_kwargs[""node_renderer""] = node_renderer renderer_kwargs[""edge_renderer""] = edge_renderer return renderer_kwargs ","def get_graph_kwargs(node_source, edge_source, **kwargs): if not isinstance(node_source, ColumnarDataSource): try: # try converting the source to ColumnDataSource node_source = ColumnDataSource(node_source) except ValueError as err: msg = ""Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}"".format( curr_type=str(type(node_source)), err=err.message ) raise ValueError(msg).with_traceback(sys.exc_info()[2]) if not isinstance(edge_source, ColumnarDataSource): try: # try converting the source to ColumnDataSource edge_source = ColumnDataSource(edge_source) except ValueError as err: msg = ""Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}"".format( curr_type=str(type(edge_source)), err=err.message ) raise ValueError(msg).with_traceback(sys.exc_info()[2]) marker = kwargs.pop('node_marker', None) marker_type = Scatter if isinstance(marker, dict) and 'field' in marker or marker in node_source.data: kwargs['node_marker'] = field(marker) else: if isinstance(marker, dict) and 'value' in marker: marker = marker['value'] if marker is None or marker == ""circle"": marker_type = Circle else: kwargs[""node_marker""] = marker ## node stuff node_visuals = pop_visuals(marker_type, kwargs, prefix=""node_"") if any(x.startswith('node_selection_') for x in kwargs): snode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_selection_"", defaults=node_visuals) else: snode_visuals = None if any(x.startswith('node_hover_') for x in kwargs): hnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_hover_"", defaults=node_visuals) else: hnode_visuals = None # Always set muted glyph mnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_muted_"", defaults=node_visuals, override_defaults={'alpha':0.2}) #Always set nonselection glyph nsnode_visuals = pop_visuals(marker_type, kwargs, prefix=""node_nonselection_"", defaults=node_visuals) ## edge stuff edge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_"") if any(x.startswith('edge_selection_') for x in kwargs): sedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_selection_"", defaults=edge_visuals) else: sedge_visuals = None if any(x.startswith('edge_hover_') for x in kwargs): hedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_hover_"", defaults=edge_visuals) else: hedge_visuals = None #Always set muted glyph medge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_muted_"", defaults=edge_visuals, override_defaults={'alpha':0.2}) #Always set nonselection glyph nsedge_visuals = pop_visuals(MultiLine, kwargs, prefix=""edge_nonselection_"", defaults=edge_visuals) ## node stuff node_kwargs = {k.lstrip('node_'): v for k, v in kwargs.copy().items() if k.lstrip('node_') in marker_type.properties()} node_glyph = make_glyph(marker_type, node_kwargs, node_visuals) nsnode_glyph = make_glyph(marker_type, node_kwargs, nsnode_visuals) snode_glyph = make_glyph(marker_type, node_kwargs, snode_visuals) hnode_glyph = make_glyph(marker_type, node_kwargs, hnode_visuals) mnode_glyph = make_glyph(marker_type, node_kwargs, mnode_visuals) node_renderer = GlyphRenderer( data_source=node_source, glyph=node_glyph, selection_glyph=snode_glyph or ""auto"", nonselection_glyph=nsnode_glyph or ""auto"", hover_glyph=hnode_glyph, muted_glyph=mnode_glyph or ""auto"", ) ## edge stuff edge_kwargs = {k.lstrip('edge_'): v for k, v in kwargs.copy().items() if k.lstrip('edge_') in MultiLine.properties()} edge_glyph = make_glyph(MultiLine, edge_kwargs, edge_visuals) nsedge_glyph = make_glyph(MultiLine, edge_kwargs, nsedge_visuals) sedge_glyph = make_glyph(MultiLine, edge_kwargs, sedge_visuals) hedge_glyph = make_glyph(MultiLine, edge_kwargs, hedge_visuals) medge_glyph = make_glyph(MultiLine, edge_kwargs, medge_visuals) edge_renderer = GlyphRenderer( data_source=edge_source, glyph=edge_glyph, selection_glyph=sedge_glyph or ""auto"", nonselection_glyph=nsedge_glyph or ""auto"", hover_glyph=hedge_glyph, muted_glyph=medge_glyph or ""auto"", ) renderer_kwargs = {attr: kwargs.pop(attr) for attr in RENDERER_ARGS if attr in kwargs} renderer_kwargs[""node_renderer""] = node_renderer renderer_kwargs[""edge_renderer""] = edge_renderer return renderer_kwargs " 36602,"def families(root=None, displayof=None): ""Get font families (as a tuple)"" if not root: root = tkinter._get_default_root('use font.families()') args = () if displayof: args = ('-displayof', displayof) return root.tk.splitlist(root.tk.call(""font"", ""families"", *args)) ","def families(root=None, displayof=None): ""Get font families (as a tuple)"" if not root: tk = tkinter._get_default_root('use font.families()').tk args = () if displayof: args = ('-displayof', displayof) return root.tk.splitlist(root.tk.call(""font"", ""families"", *args)) " 42072,"def _ensure_not_nan(value): # Ensure the value is not Nan, which is not supported by MySQL # if Nan, change it the None if isinstance(value, tuple) or isinstance(value, list): return type(value)([None if math.isnan(v) else v for v in value]) elif isinstance(value, dict): return {key: None if math.isnan(v) else v for key, v in value.iterms()} else: return None if math.isnan(value) else value ","def _ensure_not_nan(value): # Ensure the value is not Nan, which is not supported by MySQL # if Nan, change it the None if isinstance(value, (tuple, list)): return type(value)([None if math.isnan(v) else v for v in value]) elif isinstance(value, dict): return {key: None if math.isnan(v) else v for key, v in value.iterms()} else: return None if math.isnan(value) else value " 30295,"def add_tag_to_model(model_id, tags, model=""intelligence""): """""" Adds tag to specific Threat Model. By default is set to intelligence (indicators). """""" data = { 'tags': [{'name': t.strip(), 'tlp': 'red'} for t in tags.split(',')] } res = http_request(""POST"", F""v1/{model}/{model_id}/tag/"", params=CREDENTIALS, data=json.dumps(data)) if str(res.get('success', '')).lower() == 'true': return_outputs(F""Added successfully tags: {tags} to {model} with {model_id}"", None, res) else: return_outputs(F""Failed to add {tags} to {model} with {model_id}"", None, res) ","def add_tag_to_model(model_id, tags, model=""intelligence""): """""" Adds tag to specific Threat Model. By default is set to intelligence (indicators). """""" data = { 'tags': [{'name': t.strip(), 'tlp': 'red'} for t in tags.split(',')] } res = http_request(""POST"", F""v1/{model}/{model_id}/tag/"", params=CREDENTIALS, json=data) if str(res.get('success', '')).lower() == 'true': return_outputs(F""Added successfully tags: {tags} to {model} with {model_id}"", None, res) else: return_outputs(F""Failed to add {tags} to {model} with {model_id}"", None, res) " 14504,"def test_attach_calendar(runner): runner = runner(days=2) result = runner.invoke(main_khal, ['printcalendars']) assert set(result.output.split('\n')[:3]) == {['one', 'two', 'three']} assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-a', 'one']) assert result.output == 'one\n' assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-d', 'one']) assert set(result.output.split('\n')[:2]) == {['two', 'three']} assert not result.exception ","def test_attach_calendar(runner): runner = runner(days=2) result = runner.invoke(main_khal, ['printcalendars']) assert set(result.output.split('\n')[:3]) == {['one', 'two', 'three']} assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-a', 'one']) assert result.output == 'one\n' assert not result.exception result = runner.invoke(main_khal, ['printcalendars', '-d', 'one']) assert set(result.output.split('\n')[:2]) == {'two', 'three'} assert not result.exception " 35968,"def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """""" Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set = new_pks_set.union(accumulator_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('I am starting node deletion.') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.') ","def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """""" Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set.update(new_pks_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('I am starting node deletion.') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.') " 38500,"def compute_well_rock_matrix_intersections( gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5 ): """"""Compute intersections and add edge coupling between the well and the rock matrix. To be called after the wells grids are constructed. We are assuming convex cells and one single high dimensional grid. Parameters: gb (pp.GridBucket): the grid bucket containing all the elements cells (np.ndarray, optional): a set of cells that might be considered to construct the tree. If it is not given the tree is constructed by using all the higher dimensional grid cells tol (float, optional): geometric tolerance """""" # Extract the dimension of the rock matrix, assumed to be of highest dimension dim_max: int = gb.dim_max() # We assume only one single higher dimensional grid, needed for the ADTree g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0] # Construct an ADTree for fast computation tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim) tree.from_grid(g_max, cells) # Extract the grids of the wells of co-dimension 2 gs_w = gb.grids_of_dimension(dim_max - 2) # Pre-compute some well informations nodes_w = np.empty(gs_w.size, dtype=object) for idw, g_w in enumerate(gs_w): g_w_cn = g_w.cell_nodes() g_w_cells = np.arange(g_w.num_cells) # get the cells of the 0d as segments (start, end) first = g_w_cn.indptr[g_w_cells] second = g_w_cn.indptr[g_w_cells + 1] nodes_w[idw] = ( g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T ) # Operate on the rock matrix grid (faces, cells, _) = sps.find(g_max.cell_faces) faces = faces[np.argsort(cells)] nodes, _, _ = sps.find(g_max.face_nodes) indptr = g_max.face_nodes.indptr # Loop on all the well grids for g_w, n_w in zip(gs_w, nodes_w): # extract the start and end point of the segments start = g_w.nodes[:, n_w[0]] end = g_w.nodes[:, n_w[1]] # Lists for the cell_cell_map primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], [] # Operate on the segments for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)): # Create the box for the segment by ordering its start and end box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten() seg_cells = tree.search(pp.adtree.ADTNode(""dummy_node"", box)) # Loop on all the higher dimensional cells for c in seg_cells: # For the current cell retrieve its faces loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1]) faces_loc = faces[loc] # Get the local nodes, face based poly = np.array( [ g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]] for f in faces_loc ] ) # Compute the intersections between the segment and the current higher # dimensional cell ratio = pp.intersections.segments_polyhedron( seg_start, seg_end, poly, tol ) # Store the requested information to build the projection operator if ratio > 0: primary_to_mortar_I += [seg_id] primary_to_mortar_J += [c] primary_to_mortar_data += ratio.tolist() primary_to_mortar_int = sps.csc_matrix( (primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)), shape=(g_w.num_cells, g_max.num_cells), ) secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format=""csc"") # create the mortar grid and set the maps side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()} mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim) mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int) mg.compute_geometry() # add a new edge to the grid bucket gb.add_edge((g_max, g_w), mg._primary_to_mortar_int) d_e = gb.edge_props((g_max, g_w)) d_e[""mortar_grid""] = mg # Update the node number gb.assign_node_ordering() ","def compute_well_rock_matrix_intersections( gb: pp.GridBucket, cells: np.ndarray = None, tol: float = 1e-5 ): """"""Compute intersections and add edge coupling between the well and the rock matrix. To be called after the wells grids are constructed. We are assuming convex cells and one single high dimensional grid. Parameters: gb (pp.GridBucket): the grid bucket containing all the elements cells (np.ndarray, optional): a set of cells that might be considered to construct the tree. If it is not given the tree is constructed by using all the higher dimensional grid cells tol (float, optional): geometric tolerance """""" # Extract the dimension of the rock matrix, assumed to be of highest dimension dim_max: int = gb.dim_max() # We assume only one single higher dimensional grid, needed for the ADTree g_max: pp.Grid = gb.grids_of_dimension(dim_max)[0] # Construct an ADTree for fast computation tree = pp.adtree.ADTree(2 * g_max.dim, g_max.dim) tree.from_grid(g_max, cells) # Extract the grids of the wells of co-dimension 2 gs_w = gb.grids_of_dimension(dim_max - 2) # Pre-compute some well informations nodes_w = np.empty(gs_w.size, dtype=object) for idw, g_w in enumerate(gs_w): g_w_cn = g_w.cell_nodes() g_w_cells = np.arange(g_w.num_cells) # get the cells of the 0d as segments (start, end) first = g_w_cn.indptr[g_w_cells] second = g_w_cn.indptr[g_w_cells + 1] nodes_w[idw] = ( g_w_cn.indices[pp.utils.mcolon.mcolon(first, second)].reshape((-1, 2)).T ) # Operate on the rock matrix grid (faces, cells, _) = sps.find(g_max.cell_faces) faces = faces[np.argsort(cells)] nodes, *_ = sps.find(g_max.face_nodes) indptr = g_max.face_nodes.indptr # Loop on all the well grids for g_w, n_w in zip(gs_w, nodes_w): # extract the start and end point of the segments start = g_w.nodes[:, n_w[0]] end = g_w.nodes[:, n_w[1]] # Lists for the cell_cell_map primary_to_mortar_I, primary_to_mortar_J, primary_to_mortar_data = [], [], [] # Operate on the segments for seg_id, (seg_start, seg_end) in enumerate(zip(start.T, end.T)): # Create the box for the segment by ordering its start and end box = np.sort(np.vstack((seg_start, seg_end)), axis=0).flatten() seg_cells = tree.search(pp.adtree.ADTNode(""dummy_node"", box)) # Loop on all the higher dimensional cells for c in seg_cells: # For the current cell retrieve its faces loc = slice(g_max.cell_faces.indptr[c], g_max.cell_faces.indptr[c + 1]) faces_loc = faces[loc] # Get the local nodes, face based poly = np.array( [ g_max.nodes[:, nodes[indptr[f] : indptr[f + 1]]] for f in faces_loc ] ) # Compute the intersections between the segment and the current higher # dimensional cell ratio = pp.intersections.segments_polyhedron( seg_start, seg_end, poly, tol ) # Store the requested information to build the projection operator if ratio > 0: primary_to_mortar_I += [seg_id] primary_to_mortar_J += [c] primary_to_mortar_data += ratio.tolist() primary_to_mortar_int = sps.csc_matrix( (primary_to_mortar_data, (primary_to_mortar_I, primary_to_mortar_J)), shape=(g_w.num_cells, g_max.num_cells), ) secondary_to_mortar_int = sps.diags(np.ones(g_w.num_cells), format=""csc"") # create the mortar grid and set the maps side_g = {pp.grids.mortar_grid.MortarSides.LEFT_SIDE: g_w.copy()} mg = pp.MortarGrid(g_w.dim, side_g, codim=g_max.dim - g_w.dim) mg.set_projection_to_mortar_int(primary_to_mortar_int, secondary_to_mortar_int) mg.compute_geometry() # add a new edge to the grid bucket gb.add_edge((g_max, g_w), mg._primary_to_mortar_int) d_e = gb.edge_props((g_max, g_w)) d_e[""mortar_grid""] = mg # Update the node number gb.assign_node_ordering() " 49603,"def full(shape, fill_value, *args, **kwargs): # np.isscalar has somewhat strange behavior: # https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html if np.ndim(fill_value) != 0: raise ValueError( f""fill_value must be scalar. Received {type(fill_value).__name__} instead."" ) if ""dtype"" not in kwargs: kwargs[""dtype""] = np.dtype(fill_value) return _full(shape=shape, fill_value=fill_value, *args, **kwargs) ","def full(shape, fill_value, *args, **kwargs): # np.isscalar has somewhat strange behavior: # https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html if np.ndim(fill_value) != 0: raise ValueError( f""fill_value must be scalar. Received {type(fill_value).__name__} instead."" ) if ""dtype"" not in kwargs: if hasattr(fill_value, ""dtype""): kwargs[""dtype""] = fill_value.dtype else: kwargs[""dtype""] = type(fill_value) return _full(shape=shape, fill_value=fill_value, *args, **kwargs) " 29658,"def ensure_default_client(client): ""Ensures the client passed as argument is set as the default."" dask.config.set(scheduler=""dask.distributed"") _set_global_client(client) ","def ensure_default_client(client): """"""Ensures the client passed as argument is set as the default"""""" dask.config.set(scheduler=""dask.distributed"") _set_global_client(client) " 23836,"def test_change_branch_in_root_commit(): """""" https://github.com/conan-io/conan/issues/10971#issuecomment-1089316912 """""" c = TestClient() conanfile = MockConanfile({}) c.save({""root.txt"": """", ""subfolder/subfolder.txt"": """"}) c.run_command(""git init ."") c.run_command('git config user.name myname') c.run_command('git config user.email myname@mycompany.com') c.run_command(""git add ."") c.run_command('git commit -m ""initial commit""') c.run_command(""git checkout -b change_branch"") c.save({""subfolder/subfolder.txt"": ""CHANGED""}) c.run_command(""git add ."") c.run_command('git commit -m ""second commit""') c.run_command(""git checkout master"") c.run_command(""git merge --no-ff change_branch -m 'Merge branch'"") git = Git(conanfile, folder=c.current_folder) commit_conan = git.get_commit() c.run_command(""git rev-parse HEAD"") commit_real = str(c.out).splitlines()[0] assert commit_conan == commit_real ","def test_change_branch_in_root_commit(): """""" https://github.com/conan-io/conan/issues/10971#issuecomment-1089316912 """""" c = TestClient() conanfile = MockConanfile({}) c.save({""root.txt"": """", ""subfolder/subfolder.txt"": """"}) c.run_command(""git init ."") c.run_command('git config user.name myname') c.run_command('git config user.email myname@mycompany.com') c.run_command(""git add ."") c.run_command('git commit -m ""initial commit""') c.run_command(""git checkout -b change_branch"") c.save({""subfolder/subfolder.txt"": ""CHANGED""}) c.run_command(""git add ."") c.run_command('git commit -m ""second commit""') c.run_command(""git checkout master"") c.run_command('git merge --no-ff change_branch -m ""Merge branch""') git = Git(conanfile, folder=c.current_folder) commit_conan = git.get_commit() c.run_command(""git rev-parse HEAD"") commit_real = str(c.out).splitlines()[0] assert commit_conan == commit_real " 45558,"def cultural_diversity_observance(holidays): """""" Apply the nearest monday rule, and also exclude 2012 (Day of Respect for Cultural Diversity breaks the nearest monday rule in 2012). """""" holidays = pd.to_datetime( holidays.map(lambda d: nearest_monday(d)) ) return holidays[holidays.year != 2012] ","def cultural_diversity_observance(holidays): """""" Apply the nearest monday rule, and also exclude 2012 (Day of Respect for Cultural Diversity breaks the nearest monday rule in 2012). """""" holidays = pd.to_datetime( holidays.map(nearest_monday) ) return holidays[holidays.year != 2012] " 39633,"def _concat_nbest_translations(translations: List[Translation], stop_ids: Set[int], length_penalty: LengthPenalty) -> Translation: """""" Combine nbest translations through concatenation. :param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length. :param translations: The EOS symbols. :return: A concatenation of the translations with a score. """""" expanded_translations = [_expand_nbest_translation(translation) for translation in translations] concatenated_translations = [] # type: List[Translation] for translations_to_concat in zip(*expanded_translations): concatenated_translations.append(_concat_translations(translations=list(translations_to_concat), stop_ids=stop_ids, length_penalty=length_penalty)) return _reduce_nbest_translations(concatenated_translations) ","def _concat_nbest_translations(translations: List[Translation], stop_ids: Set[int], length_penalty: LengthPenalty) -> Translation: """""" Combine nbest translations through concatenation. :param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length. :param translations: The EOS symbols. :return: A concatenation of the translations with a score. """""" expanded_translations = (_expand_nbest_translation(translation) for translation in translations) concatenated_translations = [] # type: List[Translation] for translations_to_concat in zip(*expanded_translations): concatenated_translations.append(_concat_translations(translations=list(translations_to_concat), stop_ids=stop_ids, length_penalty=length_penalty)) return _reduce_nbest_translations(concatenated_translations) " 5838,"def directionalmean(samples, axis=0, nan_policy='propagate'): """""" Computes the directional mean of a sample of vectors. Serves as equivalent of the sample mean for directional data whose magnitude is irrelevant, e. g. unit vectors. Parameters ---------- samples : array_like Input array. Must at least be two-dimensional. axis : int, optional Axis along which directional means are computed. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- directionalmean : ndarray Directional mean. Notes ----- This uses a definition of directional mean from [1]_. Essentially, the calculation is as follows. .. code-block:: python mean=samples.mean() directionalmean = mean/np.linalg.norm(mean) References ---------- .. [1] Mardia, Jupp. (2000). *Directional Statistics* (p. 163). Wiley. Examples -------- >>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]]) >>> directionalmean(data) array([1., 0., 0.]) The `regular`sample mean in contrast does not lie on the unit sphere. >>> data.mean(axis=0) array([0.8660254, 0., 0.]) """""" samples = np.asarray(samples) if samples.ndim < 2: raise ValueError(""samples must at least be two-dimensional. "" ""Instead samples has shape: %r."" % samples.shape) contains_nan, nan_policy = _contains_nan(samples, nan_policy) if contains_nan and nan_policy == 'omit': mean = np.nanmean(samples, axis = axis) else: mean = np.mean(samples, axis = axis) directional_mean = mean/np.linalg.norm(mean) return directional_mean ","def directionalmean(samples, axis=0, nan_policy='propagate'): """""" Computes the directional mean of a sample of vectors. Serves as equivalent of the sample mean for directional data whose magnitude is irrelevant, e. g. unit vectors. Parameters ---------- samples : array_like Input array. Must at least be two-dimensional. axis : int, optional Axis along which directional means are computed. Default is 0. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- directionalmean : ndarray Directional mean. Notes ----- This uses a definition of directional mean from [1]_. Essentially, the calculation is as follows. .. code-block:: python mean=samples.mean() directionalmean = mean/np.linalg.norm(mean) References ---------- .. [1] Mardia, Jupp. (2000). *Directional Statistics* (p. 163). Wiley. Examples -------- >>> data = np.array([[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0.]]) >>> directionalmean(data) array([1., 0., 0.]) The `regular`sample mean in contrast does not lie on the unit sphere. >>> data.mean(axis=0) array([0.8660254, 0., 0.]) """""" samples = np.asarray(samples) if samples.ndim < 2: raise ValueError(""samples must at least be two-dimensional. "" f""Instead samples has shape: {samples.shape!r}"") contains_nan, nan_policy = _contains_nan(samples, nan_policy) if contains_nan and nan_policy == 'omit': mean = np.nanmean(samples, axis = axis) else: mean = np.mean(samples, axis = axis) directional_mean = mean/np.linalg.norm(mean) return directional_mean " 12105,"def _set_fapl_fileobj(plist, **kwargs): """"""Set file access property list for file objects"""""" plist.set_fileobj_driver(h5fd.fileobj_driver, kwargs.get('fileobj')) ","def _set_fapl_fileobj(plist, **kwargs): """"""Set the Python file object driver in a file access property list"""""" plist.set_fileobj_driver(h5fd.fileobj_driver, kwargs.get('fileobj')) " 44863,"def channel_dropout(img, channels_to_drop, fill_value=0): if len(img.shape) == 2: raise NotImplementedError(""Only one channel. ChannelDropout is not defined."") img = img.copy() img[..., channels_to_drop] = fill_value return img ","def channel_dropout(img, channels_to_drop, fill_value=0): if len(img.shape) == 2 or img.shape[2] == 1: raise NotImplementedError(""Only one channel. ChannelDropout is not defined."") img = img.copy() img[..., channels_to_drop] = fill_value return img " 33048,"def umount(where: Path) -> None: try: run([""umount"", ""--recursive"", ""-n"", where]) except: log_umount_blockers(where) raise ","def umount(where: Path) -> None: try: run([""umount"", ""--recursive"", ""-n"", where]) except subprocess.CalledProcessError: log_umount_blockers(where) raise " 29344,"def _is_email_valid(email_address: str) -> bool: """"""Determines whether an email address is valid. Args: email_address: str. Email address to check. Returns: bool. Whether the specified email address is valid. """""" if not isinstance(email_address, str): return False stripped_address = email_address.strip() if not stripped_address: return False # Regex for a valid email. # Matches any characters before the ""@"" sign, a series of characters until # a ""."", and then a series of characters after the period. regex = r'^.+@[a-zA-Z0-9-.]+\.([a-zA-Z]+|[0-9]+)$' match = re.search(regex, email_address) return bool(match) ","def _is_email_valid(email_address: str) -> bool: """"""Determines whether an email address is valid. Args: email_address: str. Email address to check. Returns: bool. Whether the specified email address is valid. """""" if not isinstance(email_address, str): return False stripped_address = email_address.strip() if not stripped_address: return False # Regex for a valid email. # Matches any characters before the ""@"" sign, a series of characters until # a ""."", and then a series of characters after the period. regex = r'^.+@[a-zA-Z0-9-.]+\.([a-zA-Z]+|[0-9]+)$' return bool(re.search(regex, email_address)) " 49630,"def fuse( dsk, keys=None, dependencies=None, ave_width=_default, max_width=_default, max_height=_default, max_depth_new_edges=_default, rename_keys=_default, fuse_subgraphs=_default, ): """"""Fuse tasks that form reductions; more advanced than ``fuse_linear`` This trades parallelism opportunities for faster scheduling by making tasks less granular. It can replace ``fuse_linear`` in optimization passes. This optimization applies to all reductions--tasks that have at most one dependent--so it may be viewed as fusing ""multiple input, single output"" groups of tasks into a single task. There are many parameters to fine tune the behavior, which are described below. ``ave_width`` is the natural parameter with which to compare parallelism to granularity, so it should always be specified. Reasonable values for other parameters will be determined using ``ave_width`` if necessary. Parameters ---------- dsk: dict dask graph keys: list or set, optional Keys that must remain in the returned dask graph dependencies: dict, optional {key: [list-of-keys]}. Must be a list to provide count of each key This optional input often comes from ``cull`` ave_width: float (default 1) Upper limit for ``width = num_nodes / height``, a good measure of parallelizability. dask.config key: ``optimization.fuse.ave-width`` max_width: int (default infinite) Don't fuse if total width is greater than this. dask.config key: ``optimization.fuse.max-width`` max_height: int or None (default None) Don't fuse more than this many levels. Set to None to dynamically adjust to ``1.5 + ave_width * log(ave_width + 1)``. dask.config key: ``optimization.fuse.max-height`` max_depth_new_edges: int or None (default None) Don't fuse if new dependencies are added after this many levels. Set to None to dynamically adjust to ave_width * 1.5. dask.config key: ``optimization.fuse.max-depth-new-edges`` rename_keys: bool or func, optional (default True) Whether to rename the fused keys with ``default_fused_keys_renamer`` or not. Renaming fused keys can keep the graph more understandable and comprehensive, but it comes at the cost of additional processing. If False, then the top-most key will be used. For advanced usage, a function to create the new name is also accepted. dask.config key: ``optimization.fuse.rename-keys`` fuse_subgraphs : bool or None, optional (default None) Whether to fuse multiple tasks into ``SubgraphCallable`` objects. Set to None to let the default optimizer of individual dask collections decide. If no collection-specific default exists, None defaults to False. dask.config key: ``optimization.fuse.subgraphs`` Returns ------- dsk output graph with keys fused dependencies dict mapping dependencies after fusion. Useful side effect to accelerate other downstream optimizations. """""" # Perform low-level fusion unless the user has # specified False explicitly. if config.get(""optimization.fuse.active"") is False: return dsk, dependencies if keys is not None and not isinstance(keys, set): if not isinstance(keys, list): keys = [keys] keys = set(flatten(keys)) # Read defaults from dask.yaml and/or user-defined config file if ave_width is _default: ave_width = config.get(""optimization.fuse.ave-width"") assert ave_width is not _default if max_height is _default: max_height = config.get(""optimization.fuse.max-height"") assert max_height is not _default if max_depth_new_edges is _default: max_depth_new_edges = config.get(""optimization.fuse.max-depth-new-edges"") assert max_depth_new_edges is not _default if max_depth_new_edges is None: max_depth_new_edges = ave_width * 1.5 if max_width is _default: max_width = config.get(""optimization.fuse.max-width"") assert max_width is not _default if max_width is None: max_width = 1.5 + ave_width * math.log(ave_width + 1) if fuse_subgraphs is _default: fuse_subgraphs = config.get(""optimization.fuse.subgraphs"") assert fuse_subgraphs is not _default if fuse_subgraphs is None: fuse_subgraphs = False if not ave_width or not max_height: return dsk, dependencies if rename_keys is _default: rename_keys = config.get(""optimization.fuse.rename-keys"") assert rename_keys is not _default if rename_keys is True: key_renamer = default_fused_keys_renamer elif rename_keys is False: key_renamer = None elif not callable(rename_keys): raise TypeError(""rename_keys must be a boolean or callable"") else: key_renamer = rename_keys rename_keys = key_renamer is not None if dependencies is None: deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk} else: deps = dict(dependencies) rdeps = {} for k, vals in deps.items(): for v in vals: if v not in rdeps: rdeps[v] = [k] else: rdeps[v].append(k) deps[k] = set(vals) reducible = {k for k, vals in rdeps.items() if len(vals) == 1} if keys: reducible -= keys for k, v in dsk.items(): if type(v) is not tuple and not isinstance(v, (numbers.Number, str)): reducible.discard(k) if not reducible and ( not fuse_subgraphs or all(len(set(v)) != 1 for v in rdeps.values()) ): # Quick return if there's nothing to do. Only progress if there's tasks # fusible by the main `fuse`, or by `fuse_subgraphs` if enabled. return dsk, deps rv = dsk.copy() fused_trees = {} # These are the stacks we use to store data as we traverse the graph info_stack = [] children_stack = [] # For speed deps_pop = deps.pop reducible_add = reducible.add reducible_pop = reducible.pop reducible_remove = reducible.remove fused_trees_pop = fused_trees.pop info_stack_append = info_stack.append info_stack_pop = info_stack.pop children_stack_append = children_stack.append children_stack_extend = children_stack.extend children_stack_pop = children_stack.pop while reducible: parent = reducible_pop() reducible_add(parent) while parent in reducible: # Go to the top parent = rdeps[parent][0] children_stack_append(parent) children_stack_extend(reducible & deps[parent]) while True: child = children_stack[-1] if child != parent: children = reducible & deps[child] while children: # Depth-first search children_stack_extend(children) parent = child child = children_stack[-1] children = reducible & deps[child] children_stack_pop() # This is a leaf node in the reduction region # key, task, fused_keys, height, width, number of nodes, fudge, set of edges info_stack_append( ( child, rv[child], [child] if rename_keys else None, 1, 1, 1, 0, deps[child] - reducible, ) ) else: children_stack_pop() # Calculate metrics and fuse as appropriate deps_parent = deps[parent] edges = deps_parent - reducible children = deps_parent - edges num_children = len(children) if num_children == 1: ( child_key, child_task, child_keys, height, width, num_nodes, fudge, children_edges, ) = info_stack_pop() num_children_edges = len(children_edges) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) ): # Perform substitutions as we go val = subs(dsk[parent], child_key, child_task) deps_parent.remove(child_key) deps_parent |= deps_pop(child_key) del rv[child_key] reducible_remove(child_key) if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys fused_trees_pop(child_key, None) if children_stack: if no_new_edges: # Linear fuse info_stack_append( ( parent, val, child_keys, height, width, num_nodes, fudge, edges, ) ) else: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: rv[child_key] = child_task reducible_remove(child_key) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break else: child_keys = [] height = 1 width = 0 num_single_nodes = 0 num_nodes = 0 fudge = 0 children_edges = set() max_num_edges = 0 children_info = info_stack[-num_children:] del info_stack[-num_children:] for ( _cur_key, _cur_task, _cur_keys, cur_height, cur_width, cur_num_nodes, cur_fudge, cur_edges, ) in children_info: if cur_height == 1: num_single_nodes += 1 elif cur_height > height: height = cur_height width += cur_width num_nodes += cur_num_nodes fudge += cur_fudge if len(cur_edges) > max_num_edges: max_num_edges = len(cur_edges) children_edges |= cur_edges # Fudge factor to account for possible parallelism with the boundaries num_children_edges = len(children_edges) fudge += min( num_children - 1, max(0, num_children_edges - max_num_edges) ) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and num_single_nodes <= ave_width and width <= max_width and height <= max_height and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) ): # Perform substitutions as we go val = dsk[parent] children_deps = set() for child_info in children_info: cur_child = child_info[0] val = subs(val, cur_child, child_info[1]) del rv[cur_child] children_deps |= deps_pop(cur_child) reducible_remove(cur_child) if rename_keys: fused_trees_pop(cur_child, None) child_keys.extend(child_info[2]) deps_parent -= children deps_parent |= children_deps if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys if children_stack: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: for child_info in children_info: rv[child_info[0]] = child_info[1] reducible_remove(child_info[0]) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if width > max_width: width = max_width if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # key, task, height, width, number of nodes, fudge, set of edges # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break # Traverse upwards parent = rdeps[parent][0] if fuse_subgraphs: _inplace_fuse_subgraphs(rv, keys, deps, fused_trees, rename_keys) if key_renamer: for root_key, fused_keys in fused_trees.items(): alias = key_renamer(fused_keys) if alias is not None and alias not in rv: rv[alias] = rv[root_key] rv[root_key] = alias deps[alias] = deps[root_key] deps[root_key] = {alias} return rv, deps ","def fuse( dsk, keys=None, dependencies=None, ave_width=_default, max_width=_default, max_height=_default, max_depth_new_edges=_default, rename_keys=_default, fuse_subgraphs=_default, ): """"""Fuse tasks that form reductions; more advanced than ``fuse_linear`` This trades parallelism opportunities for faster scheduling by making tasks less granular. It can replace ``fuse_linear`` in optimization passes. This optimization applies to all reductions--tasks that have at most one dependent--so it may be viewed as fusing ""multiple input, single output"" groups of tasks into a single task. There are many parameters to fine tune the behavior, which are described below. ``ave_width`` is the natural parameter with which to compare parallelism to granularity, so it should always be specified. Reasonable values for other parameters will be determined using ``ave_width`` if necessary. Parameters ---------- dsk: dict dask graph keys: list or set, optional Keys that must remain in the returned dask graph dependencies: dict, optional {key: [list-of-keys]}. Must be a list to provide count of each key This optional input often comes from ``cull`` ave_width: float (default 1) Upper limit for ``width = num_nodes / height``, a good measure of parallelizability. dask.config key: ``optimization.fuse.ave-width`` max_width: int (default infinite) Don't fuse if total width is greater than this. dask.config key: ``optimization.fuse.max-width`` max_height: int or None (default None) Don't fuse more than this many levels. Set to None to dynamically adjust to ``1.5 + ave_width * log(ave_width + 1)``. dask.config key: ``optimization.fuse.max-height`` max_depth_new_edges: int or None (default None) Don't fuse if new dependencies are added after this many levels. Set to None to dynamically adjust to ave_width * 1.5. dask.config key: ``optimization.fuse.max-depth-new-edges`` rename_keys: bool or func, optional (default True) Whether to rename the fused keys with ``default_fused_keys_renamer`` or not. Renaming fused keys can keep the graph more understandable and comprehensive, but it comes at the cost of additional processing. If False, then the top-most key will be used. For advanced usage, a function to create the new name is also accepted. dask.config key: ``optimization.fuse.rename-keys`` fuse_subgraphs : bool or None, optional (default None) Whether to fuse multiple tasks into ``SubgraphCallable`` objects. Set to None to let the default optimizer of individual dask collections decide. If no collection-specific default exists, None defaults to False. dask.config key: ``optimization.fuse.subgraphs`` Returns ------- dsk output graph with keys fused dependencies dict mapping dependencies after fusion. Useful side effect to accelerate other downstream optimizations. """""" # Perform low-level fusion unless the user has # specified False explicitly. if config.get(""optimization.fuse.active"") is False: return dsk, dependencies if keys is not None and not isinstance(keys, set): if not isinstance(keys, list): keys = [keys] keys = set(flatten(keys)) # Read defaults from dask.yaml and/or user-defined config file if ave_width is _default: ave_width = config.get(""optimization.fuse.ave-width"") assert ave_width is not _default if max_height is _default: max_height = config.get(""optimization.fuse.max-height"") assert max_height is not _default if max_depth_new_edges is _default: max_depth_new_edges = config.get(""optimization.fuse.max-depth-new-edges"") assert max_depth_new_edges is not _default if max_depth_new_edges is None: max_depth_new_edges = ave_width * 1.5 if max_width is _default: max_width = config.get(""optimization.fuse.max-width"") assert max_width is not _default if max_width is None: max_width = 1.5 + ave_width * math.log(ave_width + 1) if fuse_subgraphs is _default: fuse_subgraphs = config.get(""optimization.fuse.subgraphs"") assert fuse_subgraphs is not _default if fuse_subgraphs is None: fuse_subgraphs = False if not ave_width or not max_height: return dsk, dependencies if rename_keys is _default: rename_keys = config.get(""optimization.fuse.rename-keys"") assert rename_keys is not _default if rename_keys is True: key_renamer = default_fused_keys_renamer elif rename_keys is False: key_renamer = None elif not callable(rename_keys): raise TypeError(""rename_keys must be a boolean or callable"") else: key_renamer = rename_keys rename_keys = key_renamer is not None if dependencies is None: deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk} else: deps = dict(dependencies) rdeps = {} for k, vals in deps.items(): for v in vals: if v not in rdeps: rdeps[v] = [k] else: rdeps[v].append(k) deps[k] = set(vals) reducible = {k for k, vals in rdeps.items() if len(vals) == 1} if keys: reducible -= keys for k, v in dsk.items(): if type(v) is not tuple and not isinstance(v, (numbers.Number, str)): reducible.discard(k) if not reducible and ( not fuse_subgraphs or all(len(set(v)) != 1 for v in rdeps.values()) ): # Quick return if there's nothing to do. Only progress if there's tasks # fusible by the main `fuse`, or by `fuse_subgraphs` if enabled. return dsk, deps rv = dsk.copy() fused_trees = {} # These are the stacks we use to store data as we traverse the graph info_stack = [] children_stack = [] # For speed deps_pop = deps.pop reducible_add = reducible.add reducible_pop = reducible.pop reducible_remove = reducible.remove fused_trees_pop = fused_trees.pop info_stack_append = info_stack.append info_stack_pop = info_stack.pop children_stack_append = children_stack.append children_stack_extend = children_stack.extend children_stack_pop = children_stack.pop while reducible: parent = reducible_pop() reducible_add(parent) while parent in reducible: # Go to the top parent = rdeps[parent][0] children_stack_append(parent) children_stack_extend(reducible & deps[parent]) while True: child = children_stack[-1] if child != parent: children = reducible & deps[child] while children: # Depth-first search children_stack_extend(children) parent = child child = children_stack[-1] children = reducible & deps[child] children_stack_pop() # This is a leaf node in the reduction region # key, task, fused_keys, height, width, number of nodes, fudge, set of edges info_stack_append( ( child, rv[child], [child] if rename_keys else None, 1, 1, 1, 0, deps[child] - reducible, ) ) else: children_stack_pop() # Calculate metrics and fuse as appropriate deps_parent = deps[parent] edges = deps_parent - reducible children = deps_parent - edges num_children = len(children) if num_children == 1: ( child_key, child_task, child_keys, height, width, num_nodes, fudge, children_edges, ) = info_stack_pop() num_children_edges = len(children_edges) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) ): # Perform substitutions as we go val = subs(dsk[parent], child_key, child_task) deps_parent.remove(child_key) deps_parent |= deps_pop(child_key) del rv[child_key] reducible_remove(child_key) if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys fused_trees_pop(child_key, None) if children_stack: if no_new_edges: # Linear fuse info_stack_append( ( parent, val, child_keys, height, width, num_nodes, fudge, edges, ) ) else: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: rv[child_key] = child_task reducible_remove(child_key) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break else: child_keys = [] height = 1 width = 0 num_single_nodes = 0 num_nodes = 0 fudge = 0 children_edges = set() max_num_edges = 0 children_info = info_stack[-num_children:] del info_stack[-num_children:] for ( _, _, _, cur_height, cur_width, cur_num_nodes, cur_fudge, cur_edges, ) in children_info: if cur_height == 1: num_single_nodes += 1 elif cur_height > height: height = cur_height width += cur_width num_nodes += cur_num_nodes fudge += cur_fudge if len(cur_edges) > max_num_edges: max_num_edges = len(cur_edges) children_edges |= cur_edges # Fudge factor to account for possible parallelism with the boundaries num_children_edges = len(children_edges) fudge += min( num_children - 1, max(0, num_children_edges - max_num_edges) ) if fudge > num_children_edges - 1 >= 0: fudge = num_children_edges - 1 edges |= children_edges no_new_edges = len(edges) == num_children_edges if not no_new_edges: fudge += 1 if ( (num_nodes + fudge) / height <= ave_width and num_single_nodes <= ave_width and width <= max_width and height <= max_height and # Sanity check; don't go too deep if new levels introduce new edge dependencies (no_new_edges or height < max_depth_new_edges) ): # Perform substitutions as we go val = dsk[parent] children_deps = set() for child_info in children_info: cur_child = child_info[0] val = subs(val, cur_child, child_info[1]) del rv[cur_child] children_deps |= deps_pop(cur_child) reducible_remove(cur_child) if rename_keys: fused_trees_pop(cur_child, None) child_keys.extend(child_info[2]) deps_parent -= children deps_parent |= children_deps if rename_keys: child_keys.append(parent) fused_trees[parent] = child_keys if children_stack: info_stack_append( ( parent, val, child_keys, height + 1, width, num_nodes + 1, fudge, edges, ) ) else: rv[parent] = val break else: for child_info in children_info: rv[child_info[0]] = child_info[1] reducible_remove(child_info[0]) if children_stack: # Allow the parent to be fused, but only under strict circumstances. # Ensure that linear chains may still be fused. if width > max_width: width = max_width if fudge > int(ave_width - 1): fudge = int(ave_width - 1) # key, task, height, width, number of nodes, fudge, set of edges # This task *implicitly* depends on `edges` info_stack_append( ( parent, rv[parent], [parent] if rename_keys else None, 1, width, 1, fudge, edges, ) ) else: break # Traverse upwards parent = rdeps[parent][0] if fuse_subgraphs: _inplace_fuse_subgraphs(rv, keys, deps, fused_trees, rename_keys) if key_renamer: for root_key, fused_keys in fused_trees.items(): alias = key_renamer(fused_keys) if alias is not None and alias not in rv: rv[alias] = rv[root_key] rv[root_key] = alias deps[alias] = deps[root_key] deps[root_key] = {alias} return rv, deps " 7145,"def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=np.float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) ","def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, overlap=.5, exclude_border=False): r""""""Finds blobs in the given grayscale image. Blobs are found using the Difference of Gaussian (DoG) method [1]_. For each blob found, the method returns its coordinates and the standard deviation of the Gaussian kernel that detected the blob. Parameters ---------- image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : scalar or sequence of scalars, optional the minimum standard deviation for Gaussian kernel. Keep this low to detect smaller blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. max_sigma : scalar or sequence of scalars, optional The maximum standard deviation for Gaussian kernel. Keep this high to detect larger blobs. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. sigma_ratio : float, optional The ratio between the standard deviation of Gaussian Kernels used for computing the Difference of Gaussians threshold : float, optional. The absolute lower bound for scale space maxima. Local maxima smaller than thresh are ignored. Reduce this to detect blobs with less intensities. overlap : float, optional A value between 0 and 1. If the area of two blobs overlaps by a fraction greater than `threshold`, the smaller blob is eliminated. exclude_border : int or bool, optional If nonzero int, `exclude_border` excludes blobs from within `exclude_border`-pixels of the border of the image. Returns ------- A : (n, image.ndim + sigma) ndarray A 2d array with each row representing 2 coordinate values for a 2D image, and 3 coordinate values for a 3D image, plus the sigma(s) used. When a single sigma is passed, outputs are: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard deviation of the Gaussian kernel which detected the blob. When an anisotropic gaussian is used (sigmas per dimension), the detected sigma is returned for each dimension. References ---------- .. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach Examples -------- >>> from skimage import data, feature >>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40) array([[ 267. , 359. , 16.777216], [ 267. , 115. , 10.48576 ], [ 263. , 302. , 16.777216], [ 263. , 245. , 16.777216], [ 261. , 173. , 16.777216], [ 260. , 46. , 16.777216], [ 198. , 155. , 10.48576 ], [ 196. , 43. , 10.48576 ], [ 195. , 102. , 16.777216], [ 194. , 277. , 16.777216], [ 193. , 213. , 16.777216], [ 185. , 347. , 16.777216], [ 128. , 154. , 10.48576 ], [ 127. , 102. , 10.48576 ], [ 125. , 208. , 10.48576 ], [ 125. , 45. , 16.777216], [ 124. , 337. , 10.48576 ], [ 120. , 272. , 16.777216], [ 58. , 100. , 10.48576 ], [ 54. , 276. , 10.48576 ], [ 54. , 42. , 16.777216], [ 52. , 216. , 16.777216], [ 52. , 155. , 16.777216], [ 45. , 336. , 16.777216]]) Notes ----- The radius of each blob is approximately :math:`\sqrt{2}\sigma` for a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """""" image = img_as_float(image) # Gaussian filter requires that sequence-type sigmas have same # dimensionality as image. This broadcasts scalar kernels if isinstance(max_sigma, (int, float)): max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float) if isinstance(min_sigma, (int, float)): min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float) # Convert sequence types to array min_sigma = np.asarray(min_sigma, dtype=float) max_sigma = np.asarray(max_sigma, dtype=np.float) # k such that min_sigma*(sigma_ratio**k) > max_sigma k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1)) # a geometric progression of standard deviations for gaussian kernels sigma_list = np.array([min_sigma * (sigma_ratio ** i) for i in range(k + 1)]) gaussian_images = [gaussian_filter(image, s) for s in sigma_list] # computing difference between two successive Gaussian blurred images # multiplying with average standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * np.mean(sigma_list[i]) for i in range(k)] image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=exclude_border) # Catch no peaks if local_maxima.size == 0: return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # translate final column of lm, which contains the index of the # sigma that produced the maximum intensity value, into the sigma sigmas_of_peaks = sigma_list[local_maxima[:, -1]] # if the gaussian is isotropic, the stdev across dimensions are # identical, so return only the stdev deviation of the first dimension if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,): sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None] # Remove sigma index and replace with sigmas lm = np.hstack([lm[:, :-1], sigmas_of_peaks]) return _prune_blobs(lm, overlap) " 9466,"def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), metric_name=dict(), type=dict(default='regular'), rate_limit=dict(default=2000), state=dict(default='present', choices=['present', 'absent']), conditions=dict(type='list'), purge_conditions=dict(type='bool', default=False) ), ) module = AnsibleAWSModule(argument_spec=argument_spec) state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) ","def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), metric_name=dict(), type=dict(default='regular', choices=['rate_based', 'regular']), rate_limit=dict(default=2000), state=dict(default='present', choices=['present', 'absent']), conditions=dict(type='list'), purge_conditions=dict(type='bool', default=False) ), ) module = AnsibleAWSModule(argument_spec=argument_spec) state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_rule_present(client, module) else: (changed, results) = ensure_rule_absent(client, module) module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results)) " 32377,"def fetch_incidents(args): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) last_fetch = demisto.getLastRun() first_fetch = demisto.params().get('first_fetch') attribute_key = demisto.params().get('AttributeKey') if not attribute_key: attribute_key = 'EventName' attribute_value = demisto.params().get('AttributeValue') fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch) incidents = [] incident_created_time = fetch_start_time kwargs = { 'LookupAttributes': [{ 'AttributeKey': attribute_key, 'AttributeValue': attribute_value }] } kwargs.update({'StartTime': fetch_start_time}) client.lookup_events(**kwargs) paginator = client.get_paginator('lookup_events') for response in paginator.paginate(**kwargs): for i, event in enumerate(response['Events']): incident = { 'EventId': event.get('EventId'), 'Name': event.get('EventName'), 'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')), 'EventSource': event.get('EventSource'), 'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None, 'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None, 'CloudTrailEvent': event.get('CloudTrailEvent'), 'Username': event.get('Username'), 'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str) } incidents.append(incident) incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp() if incident_created_time > fetch_start_time: last_fetch = str(incident_created_time) demisto.setLastRun(last_fetch) demisto.incidents(incidents) ","def fetch_incidents(args: dict, params: dict): client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) last_fetch = demisto.getLastRun() first_fetch = demisto.params().get('first_fetch') attribute_key = demisto.params().get('AttributeKey') if not attribute_key: attribute_key = 'EventName' attribute_value = demisto.params().get('AttributeValue') fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch) incidents = [] incident_created_time = fetch_start_time kwargs = { 'LookupAttributes': [{ 'AttributeKey': attribute_key, 'AttributeValue': attribute_value }] } kwargs.update({'StartTime': fetch_start_time}) client.lookup_events(**kwargs) paginator = client.get_paginator('lookup_events') for response in paginator.paginate(**kwargs): for i, event in enumerate(response['Events']): incident = { 'EventId': event.get('EventId'), 'Name': event.get('EventName'), 'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')), 'EventSource': event.get('EventSource'), 'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None, 'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None, 'CloudTrailEvent': event.get('CloudTrailEvent'), 'Username': event.get('Username'), 'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str) } incidents.append(incident) incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp() if incident_created_time > fetch_start_time: last_fetch = str(incident_created_time) demisto.setLastRun(last_fetch) demisto.incidents(incidents) " 36633,"def main(): import getopt from platform import system from idlelib import testing # bool value from idlelib import macosx global flist, root, use_subprocess capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False debug = False cmd = None script = None startup = False try: opts, args = getopt.getopt(sys.argv[1:], ""c:deihnr:st:"") except getopt.error as msg: print(""Error: %s\n%s"" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': cmd = a enable_shell = True if o == '-d': debug = True enable_shell = True if o == '-e': enable_edit = True if o == '-h': sys.stdout.write(usage_msg) sys.exit() if o == '-i': enable_shell = True if o == '-n': print("" Warning: running IDLE without a subprocess is deprecated."", file=sys.stderr) use_subprocess = False if o == '-r': script = a if os.path.isfile(script): pass else: print(""No script file: "", script) sys.exit() enable_shell = True if o == '-s': startup = True enable_shell = True if o == '-t': PyShell.shell_title = a enable_shell = True if args and args[0] == '-': cmd = sys.stdin.read() enable_shell = True # process sys.argv and sys.path: for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) if args and args[0] == '-': sys.argv = [''] + args[1:] elif cmd: sys.argv = ['-c'] + args elif script: sys.argv = [script] + args elif args: enable_edit = True pathx = [] for filename in args: pathx.append(os.path.dirname(filename)) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) else: dir = os.getcwd() if dir not in sys.path: sys.path.insert(0, dir) # check the IDLE settings configuration (but command line overrides) edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup', type='bool') enable_edit = enable_edit or edit_start enable_shell = enable_shell or not enable_edit # Setup root. Don't break user code run in IDLE process. # Don't change environment when testing. if use_subprocess and not testing: NoDefaultRoot() root = Tk(className=""Idle"") root.withdraw() from idlelib.run import fix_scaling fix_scaling(root) # set application icon icondir = os.path.join(os.path.dirname(__file__), 'Icons') if system() == 'Windows': iconfile = os.path.join(icondir, 'idle.ico') root.wm_iconbitmap(default=iconfile) elif not macosx.isAquaTk(): if TkVersion >= 8.6: ext = '.png' sizes = (16, 32, 48, 256) else: ext = '.gif' sizes = (16, 32, 48) iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext)) for size in sizes] icons = [PhotoImage(master=root, file=iconfile) for iconfile in iconfiles] root.wm_iconphoto(True, *icons) # start editor and/or shell windows: fixwordbreaks(root) fix_x11_paste(root) flist = PyShellFileList(root) macosx.setupApp(root, flist) if enable_edit: if not (cmd or script): for filename in args[:]: if flist.open(filename) is None: # filename is a directory actually, disconsider it args.remove(filename) if not args: flist.new() if enable_shell: shell = flist.open_shell() if not shell: return # couldn't open shell if macosx.isAquaTk() and flist.dict: # On OSX: when the user has double-clicked on a file that causes # IDLE to be launched the shell window will open just in front of # the file she wants to see. Lower the interpreter window when # there are open files. shell.top.lower() else: shell = flist.pyshell # Handle remaining options. If any of these are set, enable_shell # was set also, so shell must be true to reach here. if debug: shell.open_debugger() if startup: filename = os.environ.get(""IDLESTARTUP"") or \ os.environ.get(""PYTHONSTARTUP"") if filename and os.path.isfile(filename): shell.interp.execfile(filename) if cmd or script: shell.interp.runcommand(""""""if 1: import sys as _sys _sys.argv = %r del _sys \n"""""" % (sys.argv,)) if cmd: shell.interp.execsource(cmd) elif script: shell.interp.prepend_syspath(script) shell.interp.execfile(script) elif shell: # If there is a shell window and no cmd or script in progress, # check for problematic issues and print warning message(s) in # the IDLE shell window; this is less intrusive than always # opening a separate window. # Warn if using a problematic OS X Tk version. tkversionwarning = macosx.tkVersionWarning(root) if tkversionwarning: shell.show_warning(tkversionwarning) # Warn if the ""Prefer tabs when opening documents"" system # preference is set to ""Always"". prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning() if prefer_tabs_preference_warning: shell.show_warning(prefer_tabs_preference_warning) while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() capture_warnings(False) import _tkinter _tkinter.finalize_tcl() ","def main(): import getopt from platform import system from idlelib import testing # bool value from idlelib import macosx global flist, root, use_subprocess capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False debug = False cmd = None script = None startup = False try: opts, args = getopt.getopt(sys.argv[1:], ""c:deihnr:st:"") except getopt.error as msg: print(""Error: %s\n%s"" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': cmd = a enable_shell = True if o == '-d': debug = True enable_shell = True if o == '-e': enable_edit = True if o == '-h': sys.stdout.write(usage_msg) sys.exit() if o == '-i': enable_shell = True if o == '-n': print("" Warning: running IDLE without a subprocess is deprecated."", file=sys.stderr) use_subprocess = False if o == '-r': script = a if os.path.isfile(script): pass else: print(""No script file: "", script) sys.exit() enable_shell = True if o == '-s': startup = True enable_shell = True if o == '-t': PyShell.shell_title = a enable_shell = True if args and args[0] == '-': cmd = sys.stdin.read() enable_shell = True # process sys.argv and sys.path: for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) if args and args[0] == '-': sys.argv = [''] + args[1:] elif cmd: sys.argv = ['-c'] + args elif script: sys.argv = [script] + args elif args: enable_edit = True pathx = [] for filename in args: pathx.append(os.path.dirname(filename)) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) else: dir = os.getcwd() if dir not in sys.path: sys.path.insert(0, dir) # check the IDLE settings configuration (but command line overrides) edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup', type='bool') enable_edit = enable_edit or edit_start enable_shell = enable_shell or not enable_edit # Setup root. Don't break user code run in IDLE process. # Don't change environment when testing. if use_subprocess and not testing: NoDefaultRoot() root = Tk(className=""Idle"") root.withdraw() from idlelib.run import fix_scaling fix_scaling(root) # set application icon icondir = os.path.join(os.path.dirname(__file__), 'Icons') if system() == 'Windows': iconfile = os.path.join(icondir, 'idle.ico') root.wm_iconbitmap(default=iconfile) elif not macosx.isAquaTk(): if TkVersion >= 8.6: ext = '.png' sizes = (16, 32, 48, 256) else: ext = '.gif' sizes = (16, 32, 48) iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext)) for size in sizes] icons = [PhotoImage(master=root, file=iconfile) for iconfile in iconfiles] root.wm_iconphoto(True, *icons) # start editor and/or shell windows: fixwordbreaks(root) fix_x11_paste(root) flist = PyShellFileList(root) macosx.setupApp(root, flist) if enable_edit: if not (cmd or script): for filename in args[:]: if flist.open(filename) is None: # filename is a directory actually, disconsider it args.remove(filename) if not args: flist.new() if enable_shell: shell = flist.open_shell() if not shell: return # couldn't open shell if macosx.isAquaTk() and flist.dict: # On OSX: when the user has double-clicked on a file that causes # IDLE to be launched the shell window will open just in front of # the file she wants to see. Lower the interpreter window when # there are open files. shell.top.lower() else: shell = flist.pyshell # Handle remaining options. If any of these are set, enable_shell # was set also, so shell must be true to reach here. if debug: shell.open_debugger() if startup: filename = os.environ.get(""IDLESTARTUP"") or \ os.environ.get(""PYTHONSTARTUP"") if filename and os.path.isfile(filename): shell.interp.execfile(filename) if cmd or script: shell.interp.runcommand(""""""if 1: import sys as _sys _sys.argv = %r del _sys \n"""""" % (sys.argv,)) if cmd: shell.interp.execsource(cmd) elif script: shell.interp.prepend_syspath(script) shell.interp.execfile(script) elif shell: # If there is a shell window and no cmd or script in progress, # check for problematic issues and print warning message(s) in # the IDLE shell window; this is less intrusive than always # opening a separate window. # Warn if using a problematic OS X Tk version. tkversionwarning = macosx.tkVersionWarning(root) if tkversionwarning: shell.show_warning(tkversionwarning) # Warn if the ""Prefer tabs when opening documents"" system # preference is set to ""Always"". prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning() if prefer_tabs_preference_warning: shell.show_warning(prefer_tabs_preference_warning) while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() capture_warnings(False) import _tkinter _tkinter._finalize_tcl() " 104,"def map_data(entry): """"""Maps Standard Ebooks feed entry to an Open Library import object."""""" std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '') mapped_data = { ""title"": entry.title, ""source_records"": [f""standardebooks:{std_ebooks_id}""], ""publishers"": [entry.publisher], ""publish_date"": entry.dc_issued[0:4], ""authors"": [{""name"": author.name} for author in entry.authors], ""description"": entry.content[0].value, ""subjects"": [tag.term for tag in entry.tags], ""identifiers"": { ""standard_ebooks"": [std_ebooks_id] } } return mapped_data ","def map_data(entry) -> dict: """"""Maps Standard Ebooks feed entry to an Open Library import object."""""" std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '') mapped_data = { ""title"": entry.title, ""source_records"": [f""standardebooks:{std_ebooks_id}""], ""publishers"": [entry.publisher], ""publish_date"": entry.dc_issued[0:4], ""authors"": [{""name"": author.name} for author in entry.authors], ""description"": entry.content[0].value, ""subjects"": [tag.term for tag in entry.tags], ""identifiers"": { ""standard_ebooks"": [std_ebooks_id] } } return mapped_data " 25047,"def _called_in_methods( func: nodes.FunctionDef | nodes.Module, klass: nodes.ClassDef, methods: tuple[str, str, str, str], ) -> bool: """"""Check if the func was called in any of the given methods, belonging to the *klass*. Returns True if so, False otherwise. """""" if not isinstance(func, nodes.FunctionDef): return False for method in methods: try: inferred = klass.getattr(method) except astroid.NotFoundError: continue for infer_method in inferred: for call in infer_method.nodes_of_class(nodes.Call): try: bound = next(call.func.infer()) except (astroid.InferenceError, StopIteration): continue if not isinstance(bound, astroid.BoundMethod): continue func_obj = bound._proxied if isinstance(func_obj, astroid.UnboundMethod): func_obj = func_obj._proxied if func_obj.name == func.name: return True return False ","def _called_in_methods( func: nodes.FunctionDef | nodes.Module, klass: nodes.ClassDef, methods: Sequence[str], ) -> bool: """"""Check if the func was called in any of the given methods, belonging to the *klass*. Returns True if so, False otherwise. """""" if not isinstance(func, nodes.FunctionDef): return False for method in methods: try: inferred = klass.getattr(method) except astroid.NotFoundError: continue for infer_method in inferred: for call in infer_method.nodes_of_class(nodes.Call): try: bound = next(call.func.infer()) except (astroid.InferenceError, StopIteration): continue if not isinstance(bound, astroid.BoundMethod): continue func_obj = bound._proxied if isinstance(func_obj, astroid.UnboundMethod): func_obj = func_obj._proxied if func_obj.name == func.name: return True return False " 3209,"def in_icontains(column, values): """"""Operator to test if any of the given values are (case-insensitively) contained within values in the given column."""""" from operator import or_ query = u""{}__icontains"".format(column) return reduce(or_, [Q(**{query: v}) for v in values]) ","def in_icontains(column, values): """"""Operator to test if any of the given values are (case-insensitive) contained within values in the given column."""""" from operator import or_ query = u""{}__icontains"".format(column) return reduce(or_, [Q(**{query: v}) for v in values]) " 46059,"def nms_rotated(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, labels: Optional[torch.Tensor] = None, clockwise: bool = True) -> Tuple[torch.Tensor, torch.Tensor]: """"""Performs non-maximum suppression (NMS) on the rotated boxes according to their intersection-over-union (IoU). Rotated NMS iteratively removes lower scoring rotated boxes which have an IoU greater than iou_threshold with another (higher scoring) rotated box. Args: dets (torch.Tensor): Rotated boxes in shape (N, 5). They are expected to be in (x_ctr, y_ctr, width, height, angle_radian) format. scores (torch.Tensor): scores in shape (N, ). iou_threshold (float): IoU thresh for NMS. labels (torch.Tensor): boxes' label in shape (N,). clockwise (bool): flag indicating whether the positive angular orientation is clockwise. default True. `New in version 1.4.3.` Returns: tuple: kept dets(boxes and scores) and indice, which is always the same data type as the input. """""" if dets.shape[0] == 0: return dets, None if not clockwise: flip_mat = dets.new_ones(dets.shape[-1]) flip_mat[-1] = -1 dets_cw = dets * flip_mat else: dets_cw = dets multi_label = labels is not None if multi_label: dets_wl = torch.cat((dets_cw, labels.unsqueeze(1)), 1) # type: ignore else: dets_wl = dets_cw _, order = scores.sort(0, descending=True) dets_sorted = dets_wl.index_select(0, order) if torch.__version__ == 'parrots': keep_inds = ext_module.nms_rotated( dets_wl, scores, order, dets_sorted, iou_threshold=iou_threshold, multi_label=multi_label) else: keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted, iou_threshold, multi_label) dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)), dim=1) return dets, keep_inds ","def nms_rotated(dets: torch.Tensor, scores: torch.Tensor, iou_threshold: float, labels: Optional[torch.Tensor] = None, clockwise: bool = True) -> Tuple[torch.Tensor, torch.Tensor]: """"""Performs non-maximum suppression (NMS) on the rotated boxes according to their intersection-over-union (IoU). Rotated NMS iteratively removes lower scoring rotated boxes which have an IoU greater than iou_threshold with another (higher scoring) rotated box. Args: dets (torch.Tensor): Rotated boxes in shape (N, 5). They are expected to be in (x_ctr, y_ctr, width, height, angle_radian) format. scores (torch.Tensor): scores in shape (N, ). iou_threshold (float): IoU thresh for NMS. labels (torch.Tensor, optional): boxes' label in shape (N,). clockwise (bool): flag indicating whether the positive angular orientation is clockwise. default True. `New in version 1.4.3.` Returns: tuple: kept dets(boxes and scores) and indice, which is always the same data type as the input. """""" if dets.shape[0] == 0: return dets, None if not clockwise: flip_mat = dets.new_ones(dets.shape[-1]) flip_mat[-1] = -1 dets_cw = dets * flip_mat else: dets_cw = dets multi_label = labels is not None if multi_label: dets_wl = torch.cat((dets_cw, labels.unsqueeze(1)), 1) # type: ignore else: dets_wl = dets_cw _, order = scores.sort(0, descending=True) dets_sorted = dets_wl.index_select(0, order) if torch.__version__ == 'parrots': keep_inds = ext_module.nms_rotated( dets_wl, scores, order, dets_sorted, iou_threshold=iou_threshold, multi_label=multi_label) else: keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted, iou_threshold, multi_label) dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)), dim=1) return dets, keep_inds " 20011,"def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False): """"""This function calculates the intensity of each pixel associated with the plant and writes the values out to a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant. Inputs: gray_img = 8- or 16-bit grayscale image data mask = Binary mask made from selected contours bins = number of classes to divide spectrum into histplot = if True plots histogram of intensity values Returns: analysis_images = NIR histogram image :param gray_img: numpy array :param mask: numpy array :param bins: int :param histplot: bool :return analysis_images: plotnine ggplot """""" # apply plant shaped mask to image mask1 = binary_threshold(mask, 0, 255, 'light') mask1 = (mask1 / 255) # masked = np.multiply(gray_img, mask1) # calculate histogram if gray_img.dtype == 'uint16': maxval = 65536 else: maxval = 256 masked_array = gray_img[np.where(mask > 0)] masked_nir_mean = np.average(masked_array) masked_nir_median = np.median(masked_array) masked_nir_std = np.std(masked_array) # Make a pseudo-RGB image rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR) # Calculate histogram hist_nir = [float(l[0]) for l in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])] # Create list of bin labels bin_width = maxval / float(bins) b = 0 bin_labels = [float(b)] for i in range(bins - 1): b += bin_width bin_labels.append(b) # make hist percentage for plotting pixels = cv2.countNonZero(mask1) hist_percent = [(p / float(pixels)) * 100 for p in hist_nir] masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask) if params.debug is not None: params.device += 1 if params.debug == ""print"": print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + ""_masked_nir_plant.png"")) if params.debug == ""plot"": plot_image(masked1) analysis_image = None if histplot is True: hist_x = hist_percent # bin_labels = np.arange(0, bins) dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels, 'Proportion of pixels (%)': hist_x}) fig_hist = (ggplot(data=dataset, mapping=aes(x='Grayscale pixel intensity', y='Proportion of pixels (%)')) + geom_line(color='red') + scale_x_continuous(breaks=list(range(0, maxval, 25)))) analysis_image = fig_hist if params.debug == ""print"": fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png')) elif params.debug == ""plot"": print(fig_hist) outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list, value=hist_nir, label=bin_labels) outputs.add_observation(variable='nir_mean', trait='near-infrared mean', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_mean, label='none') outputs.add_observation(variable='nir_median', trait='near-infrared median', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float, value=masked_nir_median, label='none') outputs.add_observation(variable='nir_stdev', trait='near-infrared standard deviation', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float, value=masked_nir_std, label='none') # Store images outputs.images.append(analysis_image) return analysis_image ","def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False): """"""This function calculates the intensity of each pixel associated with the plant and writes the values out to a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant. Inputs: gray_img = 8- or 16-bit grayscale image data mask = Binary mask made from selected contours bins = number of classes to divide spectrum into histplot = if True plots histogram of intensity values Returns: analysis_images = NIR histogram image :param gray_img: numpy array :param mask: numpy array :param bins: int :param histplot: bool :return analysis_images: plotnine ggplot """""" # apply plant shaped mask to image mask1 = binary_threshold(mask, 0, 255, 'light') mask1 = (mask1 / 255) # masked = np.multiply(gray_img, mask1) # calculate histogram if gray_img.dtype == 'uint16': maxval = 65536 else: maxval = 256 masked_array = gray_img[np.where(mask > 0)] masked_nir_mean = np.average(masked_array) masked_nir_median = np.median(masked_array) masked_nir_std = np.std(masked_array) # Make a pseudo-RGB image rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR) # Calculate histogram hist_nir = [float(l[0]) for l in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])] # Create list of bin labels bin_width = maxval / float(bins) b = 0 bin_labels = [float(b)] for i in range(bins - 1): b += bin_width bin_labels.append(b) # make hist percentage for plotting pixels = cv2.countNonZero(mask1) hist_percent = [(p / float(pixels)) * 100 for p in hist_nir] masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask) if params.debug is not None: params.device += 1 if params.debug == ""print"": print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + ""_masked_nir_plant.png"")) if params.debug == ""plot"": plot_image(masked1) analysis_image = None if histplot is True: hist_x = hist_percent # bin_labels = np.arange(0, bins) dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels, 'Proportion of pixels (%)': hist_x}) fig_hist = (ggplot(data=dataset, mapping=aes(x='Grayscale pixel intensity', y='Proportion of pixels (%)')) + geom_line(color='red') + scale_x_continuous(breaks=list(range(0, maxval, 25)))) analysis_image = fig_hist if params.debug == ""print"": fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png')) elif params.debug == ""plot"": print(fig_hist) outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list, value=hist_nir, label=bin_labels) outputs.add_observation(variable='nir_mean', trait='near-infrared mean', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_mean, label='none') outputs.add_observation(variable='nir_median', trait='near-infrared median', method='plantcv.plantcv.analyze_nir_intensity', scale='none', datatype=float, value=masked_nir_median, label='none') outputs.add_observation(variable='nir_stdev', trait='near-infrared standard deviation', method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=float, value=masked_nir_std, label='none') # Store images outputs.images.append(analysis_image) return analysis_image " 38811,"def load_config(filenames=None): ret = _SiteConfig() getlogger().debug('Loading the generic configuration') ret.add_config(settings.site_configuration, '') if filenames: getlogger().debug(f'Loading configuration files: {filenames!r}') for filename in filenames: _, ext = os.path.splitext(filename) if ext == '.py': ret.add_python_config(filename) elif ext == '.json': ret.add_json_config(filename) else: raise ConfigError(f""unknown configuration file type: "" f""'{filename}'"") return ret ","def load_config(filenames=None): ret = _SiteConfig() getlogger().debug('Loading the generic configuration') ret.add_config(settings.site_configuration, '') if filenames: getlogger().debug(f'Loading configuration files: {filenames!r}') for f in filenames: _, ext = os.path.splitext(filename) if ext == '.py': ret.add_python_config(filename) elif ext == '.json': ret.add_json_config(filename) else: raise ConfigError(f""unknown configuration file type: "" f""'{filename}'"") return ret " 13572,"def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13): """"""Approximate a few eigenvalues of an |Operator|. Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve the eigenvalue problem .. math:: A v[i] = w[i] v[i] or the generalized eigenvalue problem .. math:: A v[i] = w[i] E v[i] if `E` is not `None`. The implementation is based on Algorithm 4.2 in [RL95]_. Parameters ---------- A The real |Operator| for which the eigenvalues are to be computed. E The |Operator| which defines the generalized eigenvalue problem. k The number of eigenvalues and eigenvectors which are to be computed. which A string specifying which `k` eigenvalues and eigenvectors to compute: - `'LM'`: select eigenvalues with largest |v[i]| - `'SM'`: select eigenvalues with smallest |v[i]| - `'LR'`: select eigenvalues with largest Re(v[i]) - `'SR'`: select eigenvalues with smallest Re(v[i]) - `'LI'`: select eigenvalues with largest Im(v[i]) - `'SI'`: select eigenvalues with smallest Im(v[i]) b Initial vector for Arnoldi iteration. Default is a random vector. l The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`. maxiter The maximum number of iterations. tol The relative error tolerance for the ritz estimates. Returns ------- w A |NumPy array| which contains the computed eigenvalues. v A |VectorArray| which contains the computed eigenvectors. """""" n = A.source.dim if l is None: l = np.min((n - 1, np.max((2 * k + 1, 20)))) if E is None: E = IdentityOperator(A.source) assert A.source == A.range assert E.source == A.source assert E.range == A.source assert k < n assert l > k if b is None: b = A.source.random() V, H, f = arnoldi(A, E, k, b) k0 = k i = 0 while True: i = i + 1 V, H, f = extend_arnoldi(A, E, V, H, f, l - k) ew, ev = spla.eig(H) # truncate small imaginary parts ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0 if which == 'LM': idx = np.argsort(-np.abs(ew)) elif which == 'SM': idx = np.argsort(np.abs(ew)) elif which == 'LR': idx = np.argsort(-np.real(ew)) elif which == 'SR': idx = np.argsort(np.real(ew)) elif which == 'LI': idx = np.argsort(-np.abs(np.imag(ew))) elif which == 'SI': idx = np.argsort(np.abs(np.imag(ew))) k = k0 ews = ew[idx] evs = ev[:, idx] rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews) # increase k by one in order to keep complex conjugate pairs together if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12: k = k + 1 if np.all(rres[:k] <= tol) or i >= maxiter: break # increase k in order to prevent stagnation k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2)))) # sort shifts for QR iteration based on their residual shifts = ews[k:l] srres = rres[k:l] idx = np.argsort(-srres) srres = srres[idx] shifts = shifts[idx] # don't use converged unwanted ritzvalues as shifts shifts = np.delete(shifts, np.where(srres == 0)) k = k + np.count_nonzero(srres == 0) if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12: shifts = shifts[1:] k = k + 1 H, Qs = QR_iteration(H, shifts) V = V.lincomb(Qs.T) f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1] V = V[:k] H = H[:k, :k] return ews[:k0], V.lincomb(evs[:, :k0].T) ","def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13): """"""Approximate a few eigenvalues of an |Operator|. Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve the eigenvalue problem .. math:: A v[i] = w[i] v[i] or the generalized eigenvalue problem .. math:: A v[i] = w[i] E v[i] if `E` is not `None`. The implementation is based on Algorithm 4.2 in [RL95]_. Parameters ---------- A The real |Operator| for which the eigenvalues are to be computed. E The |Operator| which defines the generalized eigenvalue problem. k The number of eigenvalues and eigenvectors which are to be computed. which A string specifying which `k` eigenvalues and eigenvectors to compute: - `'LM'`: select eigenvalues with largest |v[i]| - `'SM'`: select eigenvalues with smallest |v[i]| - `'LR'`: select eigenvalues with largest Re(v[i]) - `'SR'`: select eigenvalues with smallest Re(v[i]) - `'LI'`: select eigenvalues with largest Im(v[i]) - `'SI'`: select eigenvalues with smallest Im(v[i]) b Initial vector for Arnoldi iteration. Default is a random vector. l The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`. maxiter The maximum number of iterations. tol The relative error tolerance for the ritz estimates. Returns ------- w A |NumPy array| which contains the computed eigenvalues. v A |VectorArray| which contains the computed eigenvectors. """""" n = A.source.dim if l is None: l = min(n - 1, max(2 * k + 1, 20)) if E is None: E = IdentityOperator(A.source) assert A.source == A.range assert E.source == A.source assert E.range == A.source assert k < n assert l > k if b is None: b = A.source.random() V, H, f = arnoldi(A, E, k, b) k0 = k i = 0 while True: i = i + 1 V, H, f = extend_arnoldi(A, E, V, H, f, l - k) ew, ev = spla.eig(H) # truncate small imaginary parts ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0 if which == 'LM': idx = np.argsort(-np.abs(ew)) elif which == 'SM': idx = np.argsort(np.abs(ew)) elif which == 'LR': idx = np.argsort(-np.real(ew)) elif which == 'SR': idx = np.argsort(np.real(ew)) elif which == 'LI': idx = np.argsort(-np.abs(np.imag(ew))) elif which == 'SI': idx = np.argsort(np.abs(np.imag(ew))) k = k0 ews = ew[idx] evs = ev[:, idx] rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews) # increase k by one in order to keep complex conjugate pairs together if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12: k = k + 1 if np.all(rres[:k] <= tol) or i >= maxiter: break # increase k in order to prevent stagnation k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2)))) # sort shifts for QR iteration based on their residual shifts = ews[k:l] srres = rres[k:l] idx = np.argsort(-srres) srres = srres[idx] shifts = shifts[idx] # don't use converged unwanted ritzvalues as shifts shifts = np.delete(shifts, np.where(srres == 0)) k = k + np.count_nonzero(srres == 0) if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12: shifts = shifts[1:] k = k + 1 H, Qs = QR_iteration(H, shifts) V = V.lincomb(Qs.T) f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1] V = V[:k] H = H[:k, :k] return ews[:k0], V.lincomb(evs[:, :k0].T) " 13957,"def sensemon_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None, horizon=False): """"""Approximate the inspiral sensitive distance from a GW strain PSD This method returns the distance (in megaparsecs) to which a compact binary inspiral with the given component masses would be detectable given the instrumental PSD. The calculation is as defined in: https://dcc.ligo.org/LIGO-T030276/public Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` mass1 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the first binary component, default: `1.4` mass2 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the second binary component, default: `1.4` fmin : `float`, optional the lower frequency cut-off of the integral, default: `psd.df` fmax : `float`, optional the maximum frequency limit of the integral, defaults to innermost stable circular orbit (ISCO) frequency horizon : `bool`, optional if `True`, return the maximal 'horizon' sensitive distance, otherwise return the angle-averaged range, default: `False` Returns ------- range : `~astropy.units.Quantity` the calculated inspiral range [Mpc] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD: >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now, we can calculate the :func:`sensemon_range`: >>> from gwpy.astro import sensemon_range >>> r = sensemon_range(hoff, fmin=30) >>> print(r) 70.4612102889 Mpc """""" fisco = _get_isco_frequency(mass1, mass2) # format frequency limits fmin = units.Quantity(fmin or psd.df.value, 'Hz') # avoid DC value fmax = units.Quantity(fmax or fisco.value, 'Hz') if fmax > fisco: warnings.warn(""Upper frequency bound greater than %s-%s ISCO "" ""frequency of %s, using ISCO"" % (mass1, mass2, fisco)) fmax = fisco # integrate and return f = psd.frequencies.to('Hz') frange = (f >= fmin) & (f < fmax) integrand = sensemon_range_psd(psd[frange], snr=snr, mass1=mass1, mass2=mass2, horizon=horizon) return (units.Quantity( trapz(integrand.value, f.value[frange]), unit=integrand.unit * units.Hertz, ) ** (1/2.)).to('Mpc') ","def sensemon_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None, horizon=False): """"""Approximate the inspiral sensitive distance from a GW strain PSD This method returns the distance (in megaparsecs) to which a compact binary inspiral with the given component masses would be detectable given the instrumental PSD. The calculation is as defined in: https://dcc.ligo.org/LIGO-T030276/public Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` mass1 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the first binary component, default: `1.4` mass2 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the second binary component, default: `1.4` fmin : `float`, optional the lower frequency cut-off of the integral, default: `psd.df` fmax : `float`, optional the maximum frequency limit of the integral, defaults to innermost stable circular orbit (ISCO) frequency horizon : `bool`, optional if `True`, return the maximal 'horizon' sensitive distance, otherwise return the angle-averaged range, default: `False` Returns ------- range : `~astropy.units.Quantity` the calculated inspiral range [Mpc] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD: >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now, we can calculate the :func:`sensemon_range`: >>> from gwpy.astro import sensemon_range >>> r = sensemon_range(hoff, fmin=30) >>> print(r) Now we can calculate the :func:`sensemon_range`: """""" fisco = _get_isco_frequency(mass1, mass2) # format frequency limits fmin = units.Quantity(fmin or psd.df.value, 'Hz') # avoid DC value fmax = units.Quantity(fmax or fisco.value, 'Hz') if fmax > fisco: warnings.warn(""Upper frequency bound greater than %s-%s ISCO "" ""frequency of %s, using ISCO"" % (mass1, mass2, fisco)) fmax = fisco # integrate and return f = psd.frequencies.to('Hz') frange = (f >= fmin) & (f < fmax) integrand = sensemon_range_psd(psd[frange], snr=snr, mass1=mass1, mass2=mass2, horizon=horizon) return (units.Quantity( trapz(integrand.value, f.value[frange]), unit=integrand.unit * units.Hertz, ) ** (1/2.)).to('Mpc') " 42028,"def _get_param_importance_plot( study: Study, evaluator: Optional[BaseImportanceEvaluator] = None, params: Optional[List[str]] = None, target: Optional[Callable[[FrozenTrial], float]] = None, target_name: str = ""Objective Value"", ) -> ""Axes"": # Set up the graph style. plt.style.use(""ggplot"") # Use ggplot style sheet for similar outputs to plotly. fig, ax = plt.subplots() ax.set_title(""Hyperparameter Importances"") ax.set_xlabel(f""Importance for {target_name}"") ax.set_ylabel(""Hyperparameter"") # Prepare data for plotting. # Importances cannot be evaluated without completed trials. # Return an empty figure for consistency with other visualization functions. trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE] if len(trials) == 0: _logger.warning(""Study instance does not contain completed trials."") return ax importances = optuna.importance.get_param_importances( study, evaluator=evaluator, params=params, target=target ) importances = OrderedDict(reversed(list(importances.items()))) importance_values = list(importances.values()) param_names = list(importances.keys()) pos = np.arange(len(param_names)) # Draw horizontal bars. ax.barh( pos, importance_values, align=""center"", color=cm.get_cmap(""tab20c"")(0), tick_label=param_names, ) renderer = fig.canvas.get_renderer() for idx, val in enumerate(importance_values): label = str(round(val, 2)) if val >= 0.01 else ""<0.01"" text = ax.text(val, idx, label, va=""center"") # Sometimes horizontal axis needs to be re-scaled # to avoid text going over plot area. bbox = text.get_window_extent(renderer) bbox = bbox.transformed(ax.transData.inverted()) _, plot_xmax = ax.get_xlim() bbox_xmax = bbox.x1 if bbox_xmax > plot_xmax: ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax) return ax ","def _get_param_importance_plot( study: Study, evaluator: Optional[BaseImportanceEvaluator] = None, params: Optional[List[str]] = None, target: Optional[Callable[[FrozenTrial], float]] = None, target_name: str = ""Objective Value"", ) -> ""Axes"": # Set up the graph style. plt.style.use(""ggplot"") # Use ggplot style sheet for similar outputs to plotly. fig, ax = plt.subplots() ax.set_title(""Hyperparameter Importances"") ax.set_xlabel(f""Importance for {target_name}"") ax.set_ylabel(""Hyperparameter"") # Prepare data for plotting. # Importances cannot be evaluated without completed trials. # Return an empty figure for consistency with other visualization functions. trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE] if len(trials) == 0: _logger.warning(""Study instance does not contain completed trials."") return ax importances = optuna.importance.get_param_importances( study, evaluator=evaluator, params=params, target=target ) importances = OrderedDict(reversed(list(importances.items()))) importance_values = list(importances.values()) param_names = list(importances.keys()) pos = np.arange(len(param_names)) # Draw horizontal bars. ax.barh( pos, importance_values, align=""center"", color=cm.get_cmap(""tab20c"")(0), tick_label=param_names, ) renderer = fig.canvas.get_renderer() for idx, val in enumerate(importance_values): label = f""{val:.2f}"" if val >= 0.01 else ""<0.01"" text = ax.text(val, idx, label, va=""center"") # Sometimes horizontal axis needs to be re-scaled # to avoid text going over plot area. bbox = text.get_window_extent(renderer) bbox = bbox.transformed(ax.transData.inverted()) _, plot_xmax = ax.get_xlim() bbox_xmax = bbox.x1 if bbox_xmax > plot_xmax: ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax) return ax " 4429,"def _concatenate_epochs(epochs_list, with_data=True, add_offset=True, *, on_mismatch='raise'): """"""Auxiliary function for concatenating epochs."""""" if not isinstance(epochs_list, (list, tuple)): raise TypeError('epochs_list must be a list or tuple, got %s' % (type(epochs_list),)) for ei, epochs in enumerate(epochs_list): if not isinstance(epochs, BaseEpochs): raise TypeError('epochs_list[%d] must be an instance of Epochs, ' 'got %s' % (ei, type(epochs))) if hasattr(epochs, 'annotations'): warn('Concatenation of Annotations within Epochs ' 'is not supported yet. Annotations within these Epochs will ' 'be dropped.') epochs.set_annotations(None) out = epochs_list[0] offsets = [0] if with_data: out.drop_bad() offsets.append(len(out)) events = [out.events] metadata = [out.metadata] baseline, tmin, tmax = out.baseline, out.tmin, out.tmax info = deepcopy(out.info) verbose = out.verbose drop_log = out.drop_log event_id = deepcopy(out.event_id) selection = out.selection # offset is the last epoch + tmax + 10 second shift = int((10 + tmax) * out.info['sfreq']) events_offset = int(np.max(events[0][:, 0])) + shift events_overflow = False for ii, epochs in enumerate(epochs_list[1:], 1): _ensure_infos_match(epochs.info, info, f'epochs[{ii}]', on_mismatch=on_mismatch) if not np.allclose(epochs.times, epochs_list[0].times): raise ValueError('Epochs must have same times') if epochs.baseline != baseline: raise ValueError('Baseline must be same for all epochs') # compare event_id common_keys = list(set(event_id).intersection(set(epochs.event_id))) for key in common_keys: if not event_id[key] == epochs.event_id[key]: msg = ('event_id values must be the same for identical keys ' 'for all concatenated epochs. Key ""{}"" maps to {} in ' 'some epochs and to {} in others.') raise ValueError(msg.format(key, event_id[key], epochs.event_id[key])) if with_data: epochs.drop_bad() offsets.append(len(epochs)) evs = epochs.events.copy() if len(epochs.events) == 0: warn('One of the Epochs objects to concatenate was empty.') elif add_offset: # We need to cast to a native Python int here to detect an # overflow of a numpy int32 (which is the default on windows) max_timestamp = int(np.max(evs[:, 0])) evs[:, 0] += events_offset events_offset += max_timestamp + shift if events_offset > INT32_MAX: warn(f'Event number greater than {INT32_MAX} created, ' 'events[:, 0] will be assigned consecutive increasing ' 'integer values') events_overflow = True add_offset = False # we no longer need to add offset events.append(evs) selection = np.concatenate((selection, epochs.selection)) drop_log = drop_log + epochs.drop_log event_id.update(epochs.event_id) metadata.append(epochs.metadata) events = np.concatenate(events, axis=0) # check to see if we exceeded our maximum event offset if events_overflow: events[:, 0] = np.arange(1, len(events) + 1) # Create metadata object (or make it None) n_have = sum(this_meta is not None for this_meta in metadata) if n_have == 0: metadata = None elif n_have != len(metadata): raise ValueError('%d of %d epochs instances have metadata, either ' 'all or none must have metadata' % (n_have, len(metadata))) else: pd = _check_pandas_installed(strict=False) if pd is not False: metadata = pd.concat(metadata) else: # dict of dicts metadata = sum(metadata, list()) assert len(offsets) == (len(epochs_list) if with_data else 0) + 1 data = None if with_data: offsets = np.cumsum(offsets) for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list): this_data = epochs.get_data() if data is None: data = np.empty( (offsets[-1], len(out.ch_names), len(out.times)), dtype=this_data.dtype) data[start:stop] = this_data return (info, data, events, event_id, tmin, tmax, metadata, baseline, selection, drop_log, verbose) ","def _concatenate_epochs(epochs_list, with_data=True, add_offset=True, *, on_mismatch='raise'): """"""Auxiliary function for concatenating epochs."""""" if not isinstance(epochs_list, (list, tuple)): raise TypeError('epochs_list must be a list or tuple, got %s' % (type(epochs_list),)) for ei, epochs in enumerate(epochs_list): if not isinstance(epochs, BaseEpochs): raise TypeError('epochs_list[%d] must be an instance of Epochs, ' 'got %s' % (ei, type(epochs))) if hasattr(epochs, 'annotations'): warn('Concatenation of Annotations within Epochs ' 'is not supported yet. Annotations within these Epochs will ' 'be dropped.') epochs._set_annotations(None) out = epochs_list[0] offsets = [0] if with_data: out.drop_bad() offsets.append(len(out)) events = [out.events] metadata = [out.metadata] baseline, tmin, tmax = out.baseline, out.tmin, out.tmax info = deepcopy(out.info) verbose = out.verbose drop_log = out.drop_log event_id = deepcopy(out.event_id) selection = out.selection # offset is the last epoch + tmax + 10 second shift = int((10 + tmax) * out.info['sfreq']) events_offset = int(np.max(events[0][:, 0])) + shift events_overflow = False for ii, epochs in enumerate(epochs_list[1:], 1): _ensure_infos_match(epochs.info, info, f'epochs[{ii}]', on_mismatch=on_mismatch) if not np.allclose(epochs.times, epochs_list[0].times): raise ValueError('Epochs must have same times') if epochs.baseline != baseline: raise ValueError('Baseline must be same for all epochs') # compare event_id common_keys = list(set(event_id).intersection(set(epochs.event_id))) for key in common_keys: if not event_id[key] == epochs.event_id[key]: msg = ('event_id values must be the same for identical keys ' 'for all concatenated epochs. Key ""{}"" maps to {} in ' 'some epochs and to {} in others.') raise ValueError(msg.format(key, event_id[key], epochs.event_id[key])) if with_data: epochs.drop_bad() offsets.append(len(epochs)) evs = epochs.events.copy() if len(epochs.events) == 0: warn('One of the Epochs objects to concatenate was empty.') elif add_offset: # We need to cast to a native Python int here to detect an # overflow of a numpy int32 (which is the default on windows) max_timestamp = int(np.max(evs[:, 0])) evs[:, 0] += events_offset events_offset += max_timestamp + shift if events_offset > INT32_MAX: warn(f'Event number greater than {INT32_MAX} created, ' 'events[:, 0] will be assigned consecutive increasing ' 'integer values') events_overflow = True add_offset = False # we no longer need to add offset events.append(evs) selection = np.concatenate((selection, epochs.selection)) drop_log = drop_log + epochs.drop_log event_id.update(epochs.event_id) metadata.append(epochs.metadata) events = np.concatenate(events, axis=0) # check to see if we exceeded our maximum event offset if events_overflow: events[:, 0] = np.arange(1, len(events) + 1) # Create metadata object (or make it None) n_have = sum(this_meta is not None for this_meta in metadata) if n_have == 0: metadata = None elif n_have != len(metadata): raise ValueError('%d of %d epochs instances have metadata, either ' 'all or none must have metadata' % (n_have, len(metadata))) else: pd = _check_pandas_installed(strict=False) if pd is not False: metadata = pd.concat(metadata) else: # dict of dicts metadata = sum(metadata, list()) assert len(offsets) == (len(epochs_list) if with_data else 0) + 1 data = None if with_data: offsets = np.cumsum(offsets) for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list): this_data = epochs.get_data() if data is None: data = np.empty( (offsets[-1], len(out.ch_names), len(out.times)), dtype=this_data.dtype) data[start:stop] = this_data return (info, data, events, event_id, tmin, tmax, metadata, baseline, selection, drop_log, verbose) " 31597,"def get_members(args): try: client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) accountId_list = [] accountId_list.append(args.get('accountIds')) response = client.get_members( DetectorId=args.get('detectorId'), AccountIds=accountId_list ) members_response = response.get('Members') filtered_members = [member for member in members_response if member] ec = {""AWS.GuardDuty.Members(val.AccountId === obj.AccountId)"": filtered_members} \ if filtered_members else None return create_entry('AWS GuardDuty Members', filtered_members, ec) except Exception as e: return raise_error(e) ","def get_members(args): try: client = aws_session( region=args.get('region'), roleArn=args.get('roleArn'), roleSessionName=args.get('roleSessionName'), roleSessionDuration=args.get('roleSessionDuration'), ) accountId_list = [] accountId_list.append(args.get('accountIds')) response = client.get_members( DetectorId=args.get('detectorId'), AccountIds=accountId_list ) members_response = response.get('Members', []) filtered_members = [member for member in members_response if member] ec = {""AWS.GuardDuty.Members(val.AccountId === obj.AccountId)"": filtered_members} \ if filtered_members else None return create_entry('AWS GuardDuty Members', filtered_members, ec) except Exception as e: return raise_error(e) " 53276,"def Chandrasekhar_G(x): """""" The Chandrasekhar function G(x) goes as 2x / 3 \sqrt{\pi} at low x and 0.5 x^{-2} describes the drag on a particle by collisions with a Maxwellian background. Because it goes to zero at infinity, there always be electrons for which any applied electric field is larger than the friction force; those will be accelerated into runaway electrons. A sufficiently strong ""Dreicer"" electric field will be enough to accelerate thermal electrons."""""" erf = special.erf(x) erf_derivative = 2 * np.exp(-(x ** 2)) / np.sqrt(np.pi) return (erf - x * erf_derivative) / (2 * x ** 2) ","def Chandrasekhar_G(x): """""" The Chandrasekhar function :math:`G(x)` goes as :math:`2x / 3 \sqrt{π}` at low :math:`x` and :math:`0.5 x^{-2}` describes the drag on a particle by collisions with a Maxwellian background. Because it goes to zero at infinity, there always be electrons for which any applied electric field is larger than the friction force; those will be accelerated into runaway electrons. A sufficiently strong ""Dreicer"" electric field will be enough to accelerate thermal electrons."""""" erf = special.erf(x) erf_derivative = 2 * np.exp(-(x ** 2)) / np.sqrt(np.pi) return (erf - x * erf_derivative) / (2 * x ** 2) " 53329,"def find_floating_potential( voltage: np.ndarray, current: np.ndarray, threshold: int = 1, min_points: Union[int, float] = None, fit_type: str = ""exponential"", ): """""" Determines the floating potential (:math:`V_f`) for a given current-voltage (IV) curve obtained from a swept Langmuir probe. The floating potential is the probe bias where the collected current equals zero :math:`I = 0`. (For additional details see the **Notes** section below.) **Aliases:** `find_vf_` Parameters ---------- voltage: `numpy.ndarray` 1-D numpy array of monotonically ascending/descending probe biases (should be in volts) current: `numpy.ndarray` 1-D numpy array of probe current (should be in amperes) corresponding to the ``voltage`` array threshold: positive, non-zero `int` Max allowed index distance between crossing-points before a new crossing-island is formed. That is, if ``threshold=5`` then consecutive crossing-points are considered to be in the same crossing-island if they are within 5 index steps of each other. (Default: 1) min_points: positive `int` or `float` Minimum number of data points required for the fitting to be applied to. See **Notes** section below for additional details. The following list specifies the optional values: - ``min_points = None`` (Default) The largest of 5 and ``factor * array_size`` is taken, where ``array_size`` is the size of ``voltage`` and ``factor = 0.1`` for ``fit_type = ""linear""`` and ``0.2`` for ``""exponential""``. - ``min_points = numpy.inf`` The entire passed array is fitted. - ``min_points >= 1`` Exact minimum number of points. - ``0 < min_points < 0`` The minimum number of points is taken as ``min_points * array_size``. fit_type: str The type of curve to be fitted to the Langmuir trace, ``""linear""`` or ``""exponential""`` (Default). This selects which ``FitFunction`` class should be applied to the trace. +-------------+----------------------------------------------------------+ | linear | `~plasmapy.analysis.fit_functions.Linear` | +-------------+----------------------------------------------------------+ | exponential | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` | +-------------+----------------------------------------------------------+ Returns ------- vf: `float` or `numpy.nan` The calculated floating potential (same units as the ``voltage`` array). Returns `numpy.nan` if the floating potential can not be determined. How :math:`V_f` is calculated depends on the fit function. This is described in the ``root_solve()`` method of the relevant fit function (e.g. the :meth:`~plasmapy.analysis.fit_functions.ExponentialPlusOffset.root_solve` method of `~plasmapy.analysis.fit_functions.ExponentialPlusOffset`). vf_err: `float` or `numpy.nan` The uncertainty associated with the floating potential calculation (units same as ``vf``). Returns `numpy.nan` if the floating potential can not be determined. Like :math:`V_f`:, the calculation depends on the applied fit function. The ``root_solve()`` method also describes how this is calculated. rsq: `float` The coefficient of determination (r-squared) value of the fit. See the documentation of the ``rsq`` property on the associated fit function (e.g. the `~plasmapy.analysis.fit_functions.ExponentialPlusOffset.rsq` property of `~plasmapy.analysis.fit_functions.ExponentialPlusOffset`). func: sub-class of `~plasmapy.analysis.fit_functions.AbstractFitFunction` The callable function :math:`f(x)` representing the fit and its results. islands: ``List[slice]`` List of `slice` objects representing the indices of the identified crossing-islands. indices: `slice` A `slice` object representing the indices of ``voltage`` and ``current`` arrays used for the fit. Notes ----- The internal functionality works like: 1. The current array ``current`` is scanned for all points equal to zero and point pairs that straddle :math:`I = 0`. This forms an array of ""crossing-points."" 2. The crossing-points are then grouped into ""crossing-islands"" in based on the ``threshold`` keyword. - A new island is formed when a successive crossing-point is more (index) steps away from the previous crossing-point than allowed by ``threshold``. - If multiple crossing-islands are identified, then the span from the first point in the first island to the last point in the last island is compared to ``min_points``. If the span is less than or equal to ``min_points``, then that span is taken as one larger crossing-island for the fit; otherwise, the function is incapable of identifying :math:`V_f` and will return `numpy.nan` values. 3. To calculate the floating potential... - If the crossing-island contains fewer points than ``min_points``, then each side of the crossing-island is equally padded with the nearest neighbor points until ``min_points`` is satisfied. - A fit is then performed using `scipy.stats.linregress` for ``fit_type=""linear""`` and `scipy.optimize.curve_fit` for ``fit_type=""exponential""``. """""" rtn = FloatingPotentialResults( vf=np.nan, vf_err=np.nan, rsq=None, func=None, islands=None, indices=None )._asdict() _settings = { ""linear"": {""func"": ffuncs.Linear, ""min_point_factor"": 0.1}, ""exponential"": {""func"": ffuncs.ExponentialPlusOffset, ""min_point_factor"": 0.2}, } try: min_point_factor = _settings[fit_type][""min_point_factor""] fit_func = _settings[fit_type][""func""]() rtn[""func""] = fit_func except KeyError: raise ValueError( f""Requested fit '{fit_type}' is not a valid option. Valid options "" f""are {list(_settings.keys())}."" ) # check voltage and current arrays voltage, current = check_sweep(voltage, current, strip_units=True) if not isinstance(threshold, numbers.Integral): raise TypeError( f""Keyword 'threshold' is of type {type(threshold)}, expected an int "" f""int >= 1."" ) if threshold < 1: raise ValueError( f""Keyword 'threshold' has value ({threshold}) less than 1, "" f""value must be an int >= 1."" ) # condition min_points if min_points is None: min_points = int(np.max([5, np.around(min_point_factor * voltage.size)])) elif not isinstance(min_points, (float, np.floating, int, np.integer)): raise TypeError( f""Argument 'min_points' is wrong type '{type(min_points)}', expecting "" f""an int or float."" ) elif np.isinf(min_points): # this signals to use all points pass elif 0 < min_points < 1: min_points = int(np.round(min_points * voltage.size)) elif min_points >= 1: min_points = int(np.round(min_points)) else: raise ValueError(f""Argument 'min_points' can not be negative ({min_points})."") # find possible crossing points (cp) lower_vals = current < 0 upper_vals = current > 0 cp_exact = (current == 0.0).nonzero()[0] cp_low2high = np.logical_and(np.roll(lower_vals, 1), upper_vals).nonzero()[0] cp_high2low = np.logical_and(np.roll(lower_vals, -1), upper_vals).nonzero()[0] # adjust for array wrapping cause by np.roll cp_low2high = cp_low2high[cp_low2high != 0] cp_high2low = cp_high2low[cp_high2low != current.size - 1] # collect all candidates cp_candidates = np.concatenate( (cp_exact, cp_low2high, cp_low2high - 1, cp_high2low, cp_high2low + 1) ) cp_candidates = np.unique(cp_candidates) # sorted and unique # How many crossing-islands? cp_intervals = np.diff(cp_candidates) threshold_indices = np.where(cp_intervals > threshold)[0] n_islands = threshold_indices.size + 1 if np.isinf(min_points) or n_islands == 1: rtn[""islands""] = [slice(cp_candidates[0], cp_candidates[-1] + 1)] else: # There are multiple crossing points isl_start = np.concatenate( ([cp_candidates[0]], cp_candidates[threshold_indices + 1]) ) isl_stop = np.concatenate( (cp_candidates[threshold_indices] + 1, [cp_candidates[-1] + 1]) ) rtn[""islands""] = [ slice(start, stop) for start, stop in zip(isl_start, isl_stop) ] # do islands fall within the min_points window? isl_window = ( np.abs(np.r_[rtn[""islands""][-1]][-1] - np.r_[rtn[""islands""][0]][0]) + 1 ) if isl_window > min_points: warn( f""Unable to determine floating potential, Langmuir sweep has "" f""{n_islands} crossing-islands. Try adjusting keyword 'threshold' "" f""and/or smooth the current."", PlasmaPyWarning, ) return FloatingPotentialResults(**rtn) # Construct crossing-island (pad if needed) if np.isinf(min_points): # us all points istart = 0 istop = voltage.size - 1 else: istart = cp_candidates[0] istop = cp_candidates[-1] iadd = (istop - istart + 1) - min_points if iadd < 0: # pad front ipad_2_start = ipad_2_stop = int(np.ceil(-iadd / 2.0)) if istart - ipad_2_start < 0: ipad_2_stop += ipad_2_start - istart istart = 0 else: istart -= ipad_2_start ipad_2_start = 0 # pad rear if ((current.size - 1) - (istop + ipad_2_stop)) < 0: ipad_2_start += ipad_2_stop - (current.size - 1 - istop) istop = current.size - 1 else: istop += ipad_2_stop # re-pad front if possible if ipad_2_start > 0: if istart - ipad_2_start < 0: istart = 0 else: istart -= ipad_2_start if (istop - istart + 1) < min_points: warn( f""The number of elements in the current array ({istop - istart + 1}) "" f""is less than 'min_points' ({min_points})."", PlasmaPyWarning, ) # Perform Linear Regression Fit volt_sub = voltage[istart : istop + 1] curr_sub = current[istart : istop + 1] fit_func.curve_fit(volt_sub, curr_sub) rtn[""vf""], rtn[""vf_err""] = fit_func.root_solve() rtn.update({""rsq"": fit_func.rsq, ""indices"": slice(istart, istop + 1)}) return FloatingPotentialResults(**rtn) ","def find_floating_potential( voltage: np.ndarray, current: np.ndarray, threshold: int = 1, min_points: Union[int, float] = None, fit_type: str = ""exponential"", ): """""" Determines the floating potential (:math:`V_f`) for a given current-voltage (IV) curve obtained from a swept Langmuir probe. The floating potential is the probe bias where the collected current equals zero :math:`I = 0`. (For additional details see the **Notes** section below.) **Aliases:** `find_vf_` Parameters ---------- voltage: `numpy.ndarray` 1-D numpy array of monotonically ascending/descending probe biases (should be in volts) current: `numpy.ndarray` 1-D numpy array of probe current (should be in amperes) corresponding to the ``voltage`` array threshold: positive, non-zero `int` Max allowed index distance between crossing-points before a new crossing-island is formed. That is, if ``threshold=5`` then consecutive crossing-points are considered to be in the same crossing-island if they are within 5 index steps of each other. (Default: 1) min_points: positive `int` or `float` Minimum number of data points required for the fitting to be applied to. See **Notes** section below for additional details. The following list specifies the optional values: - ``min_points = None`` (Default) The largest of 5 and ``factor * array_size`` is taken, where ``array_size`` is the size of ``voltage`` and ``factor = 0.1`` for ``fit_type = ""linear""`` and ``0.2`` for ``""exponential""``. - ``min_points = numpy.inf`` The entire passed array is fitted. - ``min_points >= 1`` Exact minimum number of points. - ``0 < min_points < 0`` The minimum number of points is taken as ``min_points * array_size``. fit_type: str The type of curve to be fitted to the Langmuir trace, ``""linear""`` or ``""exponential""`` (Default). This selects which ``FitFunction`` class should be applied to the trace. +-------------+----------------------------------------------------------+ | linear | `~plasmapy.analysis.fit_functions.Linear` | +-------------+----------------------------------------------------------+ | exponential | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` | +-------------+----------------------------------------------------------+ Returns ------- vf: `float` or `numpy.nan` The calculated floating potential (same units as the ``voltage`` array). Returns `numpy.nan` if the floating potential can not be determined. How :math:`V_f` is calculated depends on the fit function. This is described in the ``root_solve()`` method of the relevant fit function (e.g. the :meth:`~plasmapy.analysis.fit_functions.ExponentialPlusOffset.root_solve` method of `~plasmapy.analysis.fit_functions.ExponentialPlusOffset`). vf_err: `float` or `numpy.nan` The uncertainty associated with the floating potential calculation (units same as ``vf``). Returns `numpy.nan` if the floating potential can not be determined. Like :math:`V_f`:, the calculation depends on the applied fit function. The ``root_solve()`` method also describes how this is calculated. rsq: `float` The coefficient of determination (r-squared) value of the fit. See the documentation of the ``rsq`` property on the associated fit function (e.g. the `~plasmapy.analysis.fit_functions.ExponentialPlusOffset.rsq` property of `~plasmapy.analysis.fit_functions.ExponentialPlusOffset`). func: sub-class of `~plasmapy.analysis.fit_functions.AbstractFitFunction` The callable function :math:`f(x)` representing the fit and its results. islands: ``List[slice]`` List of `slice` objects representing the indices of the identified crossing-islands. indices: `slice` A `slice` object representing the indices of ``voltage`` and ``current`` arrays used for the fit. Notes ----- The internal functionality works like: 1. The current array ``current`` is scanned for all points equal to zero and point pairs that straddle :math:`I = 0`. This forms an array of ""crossing-points."" 2. The crossing-points are then grouped into ""crossing-islands"" in based on the ``threshold`` keyword. - A new island is formed when a successive crossing-point is more (index) steps away from the previous crossing-point than allowed by ``threshold``. - If multiple crossing-islands are identified, then the span from the first point in the first island to the last point in the last island is compared to ``min_points``. If the span is less than or equal to ``min_points``, then that span is taken as one larger crossing-island for the fit; otherwise, the function is incapable of identifying :math:`V_f` and will return `numpy.nan` values. 3. To calculate the floating potential... - If the crossing-island contains fewer points than ``min_points``, then each side of the crossing-island is equally padded with the nearest neighbor points until ``min_points`` is satisfied. - A fit is then performed using `scipy.stats.linregress` for ``fit_type=""linear""`` and `scipy.optimize.curve_fit` for ``fit_type=""exponential""``. """""" rtn = FloatingPotentialResults( vf=np.nan, vf_err=np.nan, rsq=None, func=None, islands=None, indices=None )._asdict() _settings = { ""linear"": {""func"": ffuncs.Linear, ""min_point_factor"": 0.1}, ""exponential"": {""func"": ffuncs.ExponentialPlusOffset, ""min_point_factor"": 0.2}, } try: min_point_factor = _settings[fit_type][""min_point_factor""] fit_func = _settings[fit_type][""func""]() rtn[""func""] = fit_func except KeyError: raise ValueError( f""Requested fit '{fit_type}' is not a valid option. Valid options "" f""are {list(_settings.keys())}."" ) # check voltage and current arrays voltage, current = check_sweep(voltage, current, strip_units=True) if not isinstance(threshold, numbers.Integral): raise TypeError( f""Keyword 'threshold' is of type {type(threshold)}, expected an int "" f""int >= 1."" ) elif threshold < 1: raise ValueError( f""Keyword 'threshold' has value ({threshold}) less than 1, "" f""value must be an int >= 1."" ) # condition min_points if min_points is None: min_points = int(np.max([5, np.around(min_point_factor * voltage.size)])) elif not isinstance(min_points, (float, np.floating, int, np.integer)): raise TypeError( f""Argument 'min_points' is wrong type '{type(min_points)}', expecting "" f""an int or float."" ) elif np.isinf(min_points): # this signals to use all points pass elif 0 < min_points < 1: min_points = int(np.round(min_points * voltage.size)) elif min_points >= 1: min_points = int(np.round(min_points)) else: raise ValueError(f""Argument 'min_points' can not be negative ({min_points})."") # find possible crossing points (cp) lower_vals = current < 0 upper_vals = current > 0 cp_exact = (current == 0.0).nonzero()[0] cp_low2high = np.logical_and(np.roll(lower_vals, 1), upper_vals).nonzero()[0] cp_high2low = np.logical_and(np.roll(lower_vals, -1), upper_vals).nonzero()[0] # adjust for array wrapping cause by np.roll cp_low2high = cp_low2high[cp_low2high != 0] cp_high2low = cp_high2low[cp_high2low != current.size - 1] # collect all candidates cp_candidates = np.concatenate( (cp_exact, cp_low2high, cp_low2high - 1, cp_high2low, cp_high2low + 1) ) cp_candidates = np.unique(cp_candidates) # sorted and unique # How many crossing-islands? cp_intervals = np.diff(cp_candidates) threshold_indices = np.where(cp_intervals > threshold)[0] n_islands = threshold_indices.size + 1 if np.isinf(min_points) or n_islands == 1: rtn[""islands""] = [slice(cp_candidates[0], cp_candidates[-1] + 1)] else: # There are multiple crossing points isl_start = np.concatenate( ([cp_candidates[0]], cp_candidates[threshold_indices + 1]) ) isl_stop = np.concatenate( (cp_candidates[threshold_indices] + 1, [cp_candidates[-1] + 1]) ) rtn[""islands""] = [ slice(start, stop) for start, stop in zip(isl_start, isl_stop) ] # do islands fall within the min_points window? isl_window = ( np.abs(np.r_[rtn[""islands""][-1]][-1] - np.r_[rtn[""islands""][0]][0]) + 1 ) if isl_window > min_points: warn( f""Unable to determine floating potential, Langmuir sweep has "" f""{n_islands} crossing-islands. Try adjusting keyword 'threshold' "" f""and/or smooth the current."", PlasmaPyWarning, ) return FloatingPotentialResults(**rtn) # Construct crossing-island (pad if needed) if np.isinf(min_points): # us all points istart = 0 istop = voltage.size - 1 else: istart = cp_candidates[0] istop = cp_candidates[-1] iadd = (istop - istart + 1) - min_points if iadd < 0: # pad front ipad_2_start = ipad_2_stop = int(np.ceil(-iadd / 2.0)) if istart - ipad_2_start < 0: ipad_2_stop += ipad_2_start - istart istart = 0 else: istart -= ipad_2_start ipad_2_start = 0 # pad rear if ((current.size - 1) - (istop + ipad_2_stop)) < 0: ipad_2_start += ipad_2_stop - (current.size - 1 - istop) istop = current.size - 1 else: istop += ipad_2_stop # re-pad front if possible if ipad_2_start > 0: if istart - ipad_2_start < 0: istart = 0 else: istart -= ipad_2_start if (istop - istart + 1) < min_points: warn( f""The number of elements in the current array ({istop - istart + 1}) "" f""is less than 'min_points' ({min_points})."", PlasmaPyWarning, ) # Perform Linear Regression Fit volt_sub = voltage[istart : istop + 1] curr_sub = current[istart : istop + 1] fit_func.curve_fit(volt_sub, curr_sub) rtn[""vf""], rtn[""vf_err""] = fit_func.root_solve() rtn.update({""rsq"": fit_func.rsq, ""indices"": slice(istart, istop + 1)}) return FloatingPotentialResults(**rtn) " 31235,"def get_ip_status(ip): """""" Send request with no error handling, so the error handling can be done via wrapper function """""" api_call_headers = {'Authorization': 'Bearer ' + token['access_token']} params = {'ipAddress': ip} api_call_response = requests.get(LOOKUP_URL, params=params, headers=api_call_headers, verify=False) return api_call_response.text ","def get_ip_status(ip): """""" Send request with no error handling, so the error handling can be done via wrapper function """""" api_call_headers = {'Authorization': 'Bearer ' + token['access_token']} params = {'ipAddress': ip} api_call_response = requests.get(LOOKUP_URL, params=params, headers=api_call_headers, verify=USE_SSL) return api_call_response.text " 34147,"def get_valid_config(config: Text, mandatory_keys: List[Text]) -> Text: config_path = get_validated_path(config, ""config"", FALLBACK_CONFIG_PATH) missing_keys = missing_config_keys(config_path, mandatory_keys) if missing_keys: print_warning( ""Invalid config found '{}'. Missing mandatory parameters: "" ""{}. Enrich config with fallback configuration from '{}'."" """".format(config, "", "".join(missing_keys), FALLBACK_CONFIG_PATH) ) enrich_config(config_path, missing_keys, FALLBACK_CONFIG_PATH) return config_path ","def get_valid_config(config: Text, mandatory_keys: List[Text]) -> Text: config_path = get_validated_path(config, ""config"", FALLBACK_CONFIG_PATH) missing_keys = missing_config_keys(config_path, mandatory_keys) if missing_keys: print_warning( ""Invalid config found '{}'. Missing mandatory parameters: "" ""{}. Filling missing parameters from fallback configuration file: '{}'."" """".format(config, "", "".join(missing_keys), FALLBACK_CONFIG_PATH) ) enrich_config(config_path, missing_keys, FALLBACK_CONFIG_PATH) return config_path " 33795,"def _test_gpu_images(image: str) -> None: if ""gpu"" not in image: return try: DOCKER_CLIENT.containers.run( image, ""stat /usr/local/cuda/bin/nvcc"", detach=False) except docker.errors.ContainerError as e: print(""NVCC not found on a GPU image!"") raise e ","def _test_gpu_images(image: str) -> None: if ""gpu"" not in image: return try: DOCKER_CLIENT.containers.run( image, ""ls /usr/local/cuda/bin && stat /usr/local/cuda/bin/nvcc"", detach=False) except docker.errors.ContainerError as e: print(""NVCC not found on a GPU image!"") raise e " 24645,"def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10): r"""""" Returns an array of null point object, representing the null points of the given vector space. Parameters ---------- vspace: array_like The vector space as constructed by the vector_space function which is A 1 by 3 array with the first element containing the coordinates, the second element containing the vector values, and the third element containing the delta values for each dimension. MAX_ITERATIONS: int The maximum iterations of the Newton-Raphson method. The default value is 500. err: float The threshold/error that determines if convergence has occured using the Newton-Raphson method. The default value is ``1e-10``. Returns ------- array_like of `~plasmapy.analysis.nullpoint.NullPoint` An array of NullPoint objects representing the nullpoints of the given vector space. """""" nullpoints = [] for i in range(len(vspace[0][0]) - 1): for j in range(len(vspace[0][0][0]) - 1): for k in range(len(vspace[0][0][0][0]) - 1): if _reduction(vspace, [i, j, k]): if _trilinear_analysis(vspace, [i, j, k]): loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err) if loc is not None: p = NullPoint(loc, ""N/A"") if p not in nullpoints: nullpoints.append(p) return nullpoints ","def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10): r"""""" Returns an array of null point object, representing the null points of the given vector space. Parameters ---------- vspace: array_like The vector space as constructed by the ``_vector_space`` function which is A 1 by 3 array with the first element containing the coordinates, the second element containing the vector values, and the third element containing the delta values for each dimension. MAX_ITERATIONS: int The maximum iterations of the Newton-Raphson method. The default value is 500. err: float The threshold/error that determines if convergence has occured using the Newton-Raphson method. The default value is ``1e-10``. Returns ------- array_like of `~plasmapy.analysis.nullpoint.NullPoint` An array of NullPoint objects representing the nullpoints of the given vector space. """""" nullpoints = [] for i in range(len(vspace[0][0]) - 1): for j in range(len(vspace[0][0][0]) - 1): for k in range(len(vspace[0][0][0][0]) - 1): if _reduction(vspace, [i, j, k]): if _trilinear_analysis(vspace, [i, j, k]): loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err) if loc is not None: p = NullPoint(loc, ""N/A"") if p not in nullpoints: nullpoints.append(p) return nullpoints " 4312,"def _prepare_forward(forward, info, noise_cov, fixed, loose, rank, pca, use_cps, exp, limit_depth_chs, combine_xyz, allow_fixed_depth, limit): """"""Prepare a gain matrix and noise covariance for localization."""""" # Steps (according to MNE-C, we change the order of various steps # because our I/O is already done, and we create the objects # on the fly more easily): # # 1. Read the bad channels # 2. Read the necessary data from the forward solution matrix file # 3. Load the projection data # 4. Load the sensor noise covariance matrix and attach it to the forward # 5. Compose the depth-weighting matrix # 6. Compose the source covariance matrix # 7. Apply fMRI weighting (not done) # 8. Apply the linear projection to the forward solution # 9. Apply whitening to the forward computation matrix # 10. Exclude the source space points within the labels (not done) # 11. Do appropriate source weighting to the forward computation matrix # # make a copy immediately so we do it exactly once forward = forward.copy() # Deal with ""fixed"" and ""loose"" loose = _triage_loose(forward['src'], loose, fixed) del fixed # Deal with ""depth"" if exp is not None: exp = float(exp) if not 0 <= exp: raise ValueError('depth exponent should be a greater than or ' 'equal to 0, got %s' % (exp,)) exp = exp or None # alias 0. -> None # put the forward solution in correct orientation # (delaying for the case of fixed ori with depth weighting if # allow_fixed_depth is True) if loose.get('surface', 1.) == 0. and len(loose) == 1: if not is_fixed_orient(forward): if allow_fixed_depth: # can convert now logger.info('Converting forward solution to fixed orietnation') convert_forward_solution( forward, force_fixed=True, use_cps=True, copy=False) elif exp is not None and not allow_fixed_depth: raise ValueError( 'For a fixed orientation inverse solution with depth ' 'weighting, the forward solution must be free-orientation and ' 'in surface orientation') else: # loose or free ori if is_fixed_orient(forward): raise ValueError( 'Forward operator has fixed orientation and can only ' 'be used to make a fixed-orientation inverse ' 'operator.') if loose.get('surface', 1.) < 1. and not forward['surf_ori']: logger.info('Converting forward solution to surface orientation') convert_forward_solution( forward, surf_ori=True, use_cps=use_cps, copy=False) forward, info_picked = _select_orient_forward(forward, info, noise_cov, copy=False) logger.info(""Selected %d channels"" % (len(info_picked['ch_names'],))) if exp is None: depth_prior = None else: depth_prior = compute_depth_prior( forward, info_picked, exp=exp, limit_depth_chs=limit_depth_chs, combine_xyz=combine_xyz, limit=limit, noise_cov=noise_cov, rank=rank) # Deal with fixed orientation forward / inverse if loose.get('surface', 1.) == 0. and len(loose) == 1: orient_prior = None if not is_fixed_orient(forward): if depth_prior is not None: # Convert the depth prior into a fixed-orientation one logger.info(' Picked elements from a free-orientation ' 'depth-weighting prior into the fixed-orientation ' 'one') depth_prior = depth_prior[2::3] convert_forward_solution( forward, surf_ori=True, force_fixed=True, use_cps=use_cps, copy=False) else: if loose.get('surface', 1.) < 1: assert forward['surf_ori'] # In theory we could have orient_prior=None for loose=1., but # the MNE-C code does not do this orient_prior = compute_orient_prior(forward, loose=loose) logger.info('Whitening the forward solution.') noise_cov = prepare_noise_cov( noise_cov, info, info_picked['ch_names'], rank) whitener, _ = compute_whitener( noise_cov, info, info_picked['ch_names'], pca=pca, verbose=False, rank=rank) gain = np.dot(whitener, forward['sol']['data']) logger.info('Creating the source covariance matrix') source_std = np.ones(gain.shape[1], dtype=gain.dtype) if depth_prior is not None: source_std *= depth_prior if orient_prior is not None: source_std *= orient_prior np.sqrt(source_std, out=source_std) gain *= source_std # Adjusting Source Covariance matrix to make trace of G*R*G' equal # to number of sensors. logger.info('Adjusting source covariance matrix.') trace_GRGT = linalg.norm(gain, ord='fro') ** 2 n_nzero = (noise_cov['eig'] > 0).sum() scale = np.sqrt(n_nzero / trace_GRGT) source_std *= scale gain *= scale return (forward, info_picked, gain, depth_prior, orient_prior, source_std, trace_GRGT, noise_cov, whitener) ","def _prepare_forward(forward, info, noise_cov, fixed, loose, rank, pca, use_cps, exp, limit_depth_chs, combine_xyz, allow_fixed_depth, limit): """"""Prepare a gain matrix and noise covariance for localization."""""" # Steps (according to MNE-C, we change the order of various steps # because our I/O is already done, and we create the objects # on the fly more easily): # # 1. Read the bad channels # 2. Read the necessary data from the forward solution matrix file # 3. Load the projection data # 4. Load the sensor noise covariance matrix and attach it to the forward # 5. Compose the depth-weighting matrix # 6. Compose the source covariance matrix # 7. Apply fMRI weighting (not done) # 8. Apply the linear projection to the forward solution # 9. Apply whitening to the forward computation matrix # 10. Exclude the source space points within the labels (not done) # 11. Do appropriate source weighting to the forward computation matrix # # make a copy immediately so we do it exactly once forward = forward.copy() # Deal with ""fixed"" and ""loose"" loose = _triage_loose(forward['src'], loose, fixed) del fixed # Deal with ""depth"" if exp is not None: exp = float(exp) if not 0 <= exp: raise ValueError('depth exponent should be greater than or ' 'equal to 0, got %s' % (exp,)) exp = exp or None # alias 0. -> None # put the forward solution in correct orientation # (delaying for the case of fixed ori with depth weighting if # allow_fixed_depth is True) if loose.get('surface', 1.) == 0. and len(loose) == 1: if not is_fixed_orient(forward): if allow_fixed_depth: # can convert now logger.info('Converting forward solution to fixed orietnation') convert_forward_solution( forward, force_fixed=True, use_cps=True, copy=False) elif exp is not None and not allow_fixed_depth: raise ValueError( 'For a fixed orientation inverse solution with depth ' 'weighting, the forward solution must be free-orientation and ' 'in surface orientation') else: # loose or free ori if is_fixed_orient(forward): raise ValueError( 'Forward operator has fixed orientation and can only ' 'be used to make a fixed-orientation inverse ' 'operator.') if loose.get('surface', 1.) < 1. and not forward['surf_ori']: logger.info('Converting forward solution to surface orientation') convert_forward_solution( forward, surf_ori=True, use_cps=use_cps, copy=False) forward, info_picked = _select_orient_forward(forward, info, noise_cov, copy=False) logger.info(""Selected %d channels"" % (len(info_picked['ch_names'],))) if exp is None: depth_prior = None else: depth_prior = compute_depth_prior( forward, info_picked, exp=exp, limit_depth_chs=limit_depth_chs, combine_xyz=combine_xyz, limit=limit, noise_cov=noise_cov, rank=rank) # Deal with fixed orientation forward / inverse if loose.get('surface', 1.) == 0. and len(loose) == 1: orient_prior = None if not is_fixed_orient(forward): if depth_prior is not None: # Convert the depth prior into a fixed-orientation one logger.info(' Picked elements from a free-orientation ' 'depth-weighting prior into the fixed-orientation ' 'one') depth_prior = depth_prior[2::3] convert_forward_solution( forward, surf_ori=True, force_fixed=True, use_cps=use_cps, copy=False) else: if loose.get('surface', 1.) < 1: assert forward['surf_ori'] # In theory we could have orient_prior=None for loose=1., but # the MNE-C code does not do this orient_prior = compute_orient_prior(forward, loose=loose) logger.info('Whitening the forward solution.') noise_cov = prepare_noise_cov( noise_cov, info, info_picked['ch_names'], rank) whitener, _ = compute_whitener( noise_cov, info, info_picked['ch_names'], pca=pca, verbose=False, rank=rank) gain = np.dot(whitener, forward['sol']['data']) logger.info('Creating the source covariance matrix') source_std = np.ones(gain.shape[1], dtype=gain.dtype) if depth_prior is not None: source_std *= depth_prior if orient_prior is not None: source_std *= orient_prior np.sqrt(source_std, out=source_std) gain *= source_std # Adjusting Source Covariance matrix to make trace of G*R*G' equal # to number of sensors. logger.info('Adjusting source covariance matrix.') trace_GRGT = linalg.norm(gain, ord='fro') ** 2 n_nzero = (noise_cov['eig'] > 0).sum() scale = np.sqrt(n_nzero / trace_GRGT) source_std *= scale gain *= scale return (forward, info_picked, gain, depth_prior, orient_prior, source_std, trace_GRGT, noise_cov, whitener) " 30909,"def update_user_command(client, args): user_profile = args.get('user-profile') app_data = client.iam.map_user_profile_to_app_data(user_profile, OUTGOING_MAPPER) if 'id' not in app_data and args.get('create-if-not-exists'): return create_user_command(client, args) user_id = app_data.pop('id') res = client.update_user(user_id, app_data) res_json = res.json() if res.status_code == 200: active = False if res_json.get('status') == DEPROVISIONED_STATUS else True return client.iam.return_outputs(success=True, iden=res_json.get('id'), email=res_json.get('profile', {}).get('email'), username=res_json.get('profile', {}).get('login'), details=res_json, active=active) return client.iam.return_outputs(success=False, error_code=res_json.get('errorCode'), error_message=get_error_details(res_json), details=res_json) ","def update_user_command(client, args): user_profile = args.get('user-profile') app_data = client.iam.map_user_profile_to_app_data(user_profile, OUTGOING_MAPPER) if 'id' not in app_data and args.get('create-if-not-exists') == 'true': return create_user_command(client, args) user_id = app_data.pop('id') res = client.update_user(user_id, app_data) res_json = res.json() if res.status_code == 200: active = False if res_json.get('status') == DEPROVISIONED_STATUS else True return client.iam.return_outputs(success=True, iden=res_json.get('id'), email=res_json.get('profile', {}).get('email'), username=res_json.get('profile', {}).get('login'), details=res_json, active=active) return client.iam.return_outputs(success=False, error_code=res_json.get('errorCode'), error_message=get_error_details(res_json), details=res_json) " 58097,"def main() -> None: token_url = demisto.params().get('token_url') org_id = demisto.params().get('orgId') api_key = demisto.params().get('apiKey') api_secret = demisto.params().get('apiSecret') base_url = demisto.params()['url'] verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') commands = { 'umbrella-get-summary': get_summary_command, 'umbrella-list-top-threats': list_top_threats_command, } try: product_auth = UmbrellaAuthAPI(token_url, api_key, api_secret) access_token = product_auth.get_access_token()[""access_token""] headers: Dict = { ""Authorization"": f""Bearer {access_token}"" } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, org_id=org_id) if command == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') ","def main() -> None: params = demisto.params() token_url = params.get('token_url') org_id = params.get('orgId') api_key = demisto.params().get('apiKey') api_secret = demisto.params().get('apiSecret') base_url = demisto.params()['url'] verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') commands = { 'umbrella-get-summary': get_summary_command, 'umbrella-list-top-threats': list_top_threats_command, } try: product_auth = UmbrellaAuthAPI(token_url, api_key, api_secret) access_token = product_auth.get_access_token()[""access_token""] headers: Dict = { ""Authorization"": f""Bearer {access_token}"" } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, org_id=org_id) if command == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') " 14053,"def _read_parquet(path, columns=None, **kwargs): """""" Load a Parquet object from the file path, returning a GeoDataFrame. You can read a subset of columns in the file using the ``columns`` parameter. However, the structure of the returned GeoDataFrame will depend on which columns you read: * if no geometry columns are read, this will raise a ``ValueError`` - you should use the pandas `read_parquet` method instead. * if the primary geometry column saved to this file is not included in columns, the first available geometry column will be set as the geometry column of the returned GeoDataFrame. Requires 'pyarrow'. .. versionadded:: 0.8 Parameters ---------- path : str, path object columns : list-like of strings, default=None If not None, only these columns will be read from the file. If the primary geometry column is not included, the first secondary geometry read from the file will be set as the geometry column of the returned GeoDataFrame. If no geometry columns are present, a ``ValueError`` will be raised. **kwargs Any additional kwargs passed to pyarrow.parquet.read_table(). Returns ------- GeoDataFrame Examples -------- >>> df = geopandas.read_parquet(""data.parquet) # doctest: +SKIP Specifying columns to read: >>> df = geopandas.read_parquet( ... ""data.parquet, ... columns=[""geometry"", ""pop_est""] ... ) # doctest: +SKIP """""" parquet = import_optional_dependency( ""pyarrow.parquet"", extra=""pyarrow is required for Parquet support."" ) kwargs[""use_pandas_metadata""] = True table = parquet.read_table(path, columns=columns, **kwargs) return _arrow_to_geopandas(table) ","def _read_parquet(path, columns=None, **kwargs): """""" Load a Parquet object from the file path, returning a GeoDataFrame. You can read a subset of columns in the file using the ``columns`` parameter. However, the structure of the returned GeoDataFrame will depend on which columns you read: * if no geometry columns are read, this will raise a ``ValueError`` - you should use the pandas `read_parquet` method instead. * if the primary geometry column saved to this file is not included in columns, the first available geometry column will be set as the geometry column of the returned GeoDataFrame. Requires 'pyarrow'. .. versionadded:: 0.8 Parameters ---------- path : str, path object columns : list-like of strings, default=None If not None, only these columns will be read from the file. If the primary geometry column is not included, the first secondary geometry read from the file will be set as the geometry column of the returned GeoDataFrame. If no geometry columns are present, a ``ValueError`` will be raised. **kwargs Any additional kwargs passed to pyarrow.parquet.read_table(). Returns ------- GeoDataFrame Examples -------- >>> df = geopandas.read_parquet(""data.parquet) # doctest: +SKIP Specifying columns to read: >>> df = geopandas.read_parquet( ... ""data.parquet"", ... columns=[""geometry"", ""pop_est""] ... ) # doctest: +SKIP """""" parquet = import_optional_dependency( ""pyarrow.parquet"", extra=""pyarrow is required for Parquet support."" ) kwargs[""use_pandas_metadata""] = True table = parquet.read_table(path, columns=columns, **kwargs) return _arrow_to_geopandas(table) " 31188,"def main(): """"""main function, parses params and runs command functions :return: :rtype: """""" handle_proxy() username = demisto.params().get('username') apikey = demisto.params().get('apikey') base_url = demisto.params().get('url') if base_url.endswith('/'): base_url = base_url[:-1] proxy = demisto.params().get('proxy', False) verify_certificate = not demisto.params().get('insecure', False) first_fetch = demisto.params().get('first_fetch') headers = { 'Content-Type': 'application/json' } demisto.debug(""Command being called is {}"".format(demisto.command())) try: client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, username=username, apikey=apikey) args = demisto.args() if demisto.command() == 'test-module': test_module(client) elif demisto.command() == 'lp-get-incidents': return_results(get_incidents_command(client, args)) elif demisto.command() == 'lp-get-incident-data': return_results(get_incident_data_command(client, args)) elif demisto.command() == 'lp-get-incident-states': return_results(get_incident_states_command(client, args)) elif demisto.command() == 'lp-add-incident-comment': return_results(add_incident_comment_command(client, args)) elif demisto.command() == 'lp-assign-incidents': return_results(assign_incidents_command(client, args)) elif demisto.command() == 'lp-resolve-incidents': return_results(resolve_incidents_command(client, args)) elif demisto.command() == 'lp-close-incidents': return_results(close_incidents_command(client, args)) elif demisto.command() == 'lp-reopen-incidents': return_results(reopen_incidents_command(client, args)) elif demisto.command() == 'lp-get-users': return_results(get_users_command(client)) elif demisto.command() == 'fetch-incidents': demisto.incidents(fetch_incidents(client, first_fetch)) except Exception as err: demisto.error(traceback.format_exc()) # print the traceback return_error(""Failed to execute {} command. Error: {}"".format(demisto.command(), str(err))) ","def main(): """"""main function, parses params and runs command functions :return: :rtype: """""" handle_proxy() username = demisto.params().get('username') apikey = demisto.params().get('apikey') base_url = demisto.params().get('url') if base_url.endswith('/'): base_url = base_url[:-1] proxy = demisto.params().get('proxy', False) verify_certificate = not demisto.params().get('insecure', False) first_fetch = demisto.params().get('first_fetch') headers = { 'Content-Type': 'application/json' } demisto.debug(""Command being called is {}"".format(demisto.command())) try: client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy, username=username, apikey=apikey) args = demisto.args() if demisto.command() == 'test-module': return_results(test_module(client)) elif demisto.command() == 'lp-get-incidents': return_results(get_incidents_command(client, args)) elif demisto.command() == 'lp-get-incident-data': return_results(get_incident_data_command(client, args)) elif demisto.command() == 'lp-get-incident-states': return_results(get_incident_states_command(client, args)) elif demisto.command() == 'lp-add-incident-comment': return_results(add_incident_comment_command(client, args)) elif demisto.command() == 'lp-assign-incidents': return_results(assign_incidents_command(client, args)) elif demisto.command() == 'lp-resolve-incidents': return_results(resolve_incidents_command(client, args)) elif demisto.command() == 'lp-close-incidents': return_results(close_incidents_command(client, args)) elif demisto.command() == 'lp-reopen-incidents': return_results(reopen_incidents_command(client, args)) elif demisto.command() == 'lp-get-users': return_results(get_users_command(client)) elif demisto.command() == 'fetch-incidents': demisto.incidents(fetch_incidents(client, first_fetch)) except Exception as err: demisto.error(traceback.format_exc()) # print the traceback return_error(""Failed to execute {} command. Error: {}"".format(demisto.command(), str(err))) " 11681,"def waitProcessEvents(timeout=1, event: threading.Event = None): # this is ugly, especially used without an event # there is no guarantee that the state is as expected. # but for now, don't know how else to let gui and all requests settle # for some actions def cond(): if event: return event.is_set() else: return False start = time.time() while not cond(): QApplication.processEvents() if time.time() - start > timeout: return ","def waitProcessEvents(timeout: float = 1.0, event: Optional[threading.Event] = None): # this is ugly, especially used without an event # there is no guarantee that the state is as expected. # but for now, don't know how else to let gui and all requests settle # for some actions def cond(): if event: return event.is_set() else: return False start = time.time() while not cond(): QApplication.processEvents() if time.time() - start > timeout: return " 4849,"def add_fancy_patch_around(ax, bb, **kwargs): fancy = FancyBboxPatch((bb.xmin, bb.ymin), bb.width, bb.height, fc=(1., 0.8, 1., .5), ec=(1., 0.5, 1., 0.5), **kwargs) ax.add_patch(fancy) return fancy ","def add_fancy_patch_around(ax, bb, **kwargs): fancy = FancyBboxPatch((bb.xmin, bb.ymin), bb.width, bb.height, fc=(1, 0.8, 1, 0.5), ec=(1, 0.5, 1, 0.5), **kwargs) ax.add_patch(fancy) return fancy " 38789,"def _expected_lsf_directives(job): num_tasks = job.num_tasks or 1 num_tasks_per_node = job.num_tasks_per_node or 1 ptile = min( num_tasks * job.num_cpus_per_task, num_tasks_per_node * job.num_cpus_per_task) return set([ f'#BSUB -J testjob', f'#BSUB -o {job.stdout}', f'#BSUB -e {job.stderr}', f'#BSUB -n {num_tasks}', f'#BSUB -W {int(job.time_limit // 60)}', f'#BSUB -R ""span[ptile={ptile}]""', '#BSUB -x', f'#BSUB --account=spam', f'#BSUB --gres=gpu:4', f'#DW jobdw capacity=100GB', f'#DW stage_in source=/foo', ]) ","def _expected_lsf_directives(job): num_tasks = job.num_tasks or 1 num_tasks_per_node = job.num_tasks_per_node or 1 ptile = min( num_tasks * job.num_cpus_per_task, num_tasks_per_node * job.num_cpus_per_task) return set([ f'#BSUB -J testjob', f'#BSUB -o {job.stdout}', f'#BSUB -e {job.stderr}', f'#BSUB -n {num_tasks}', f'#BSUB -W {int(job.time_limit // 60)}', f'#BSUB -R ""span[ptile={ptile}]""', f'#BSUB -x', f'#BSUB --account=spam', f'#BSUB --gres=gpu:4', f'#DW jobdw capacity=100GB', f'#DW stage_in source=/foo', ]) " 20016,"def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark', record_chip_size=""median""): """"""Automatically detects a color card and output info to use in create_color_card_mask function Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl. Inputs: rgb_img = Input RGB image data containing a color card. threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss') threshvalue = Thresholding value, optional (default 125) blurry = Bool (default False) if True then image sharpening applied background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram expansion applied to better detect edges, but histogram expansion will be hindered if there is a dark background record_chip_size = Optional str for choosing chip size measurement to be recorded, either ""median"", ""mean"", or None Returns: df = Dataframe containing information about the filtered contours start_coord = Two element tuple of starting coordinates, location of the top left pixel detected spacing = Two element tuple of spacing between centers of chips :param rgb_img: numpy.ndarray :param threshold: str :param threshvalue: int :param blurry: bool :param background: str :param record_chip_size: str :return df: pandas.core.frame.DataFrame :return start_coord: tuple :return spacing: tuple """""" # Imports import skimage import pandas as pd from scipy.spatial.distance import squareform, pdist # Get image attributes height, width, channels = rgb_img.shape total_pix = float(height * width) # Minimum and maximum square size based upon 12 MP image min_area = 1000. / 12000000. * total_pix max_area = 8000000. / 12000000. * total_pix # Create gray image for further processing gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY) # Laplacian Fourier Transform detection of blurriness blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var() # If image is blurry then try to deblur using kernel if blurry: # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1], [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1], [-1, -1, -1, -1, -1]]) / 8.0 # Store result back out for further processing gray_img = cv2.filter2D(gray_img, -1, kernel) # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu # thresholding. If your image has a bright background then apply if background == 'light': clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4)) # apply CLAHE histogram expansion to find squares better with canny edge detection gray_img = clahe.apply(gray_img) elif background != 'dark': fatal_error('Background parameter ' + str(background) + ' is not ""light"" or ""dark""!') # Thresholding if threshold_type.upper() == ""OTSU"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) elif threshold_type.upper() == ""NORMAL"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY) elif threshold_type.upper() == ""ADAPTGAUSS"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0) threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 2) else: fatal_error('Input threshold=' + str(threshold_type) + ' but should be ""otsu"", ""normal"", or ""adaptgauss""!') # Apply automatic Canny edge detection using the computed median canny_edges = skimage.feature.canny(threshold) canny_edges.dtype = 'uint8' # Compute contours to find the squares of the card contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] # Variable of which contour is which mindex = [] # Variable to store moments mu = [] # Variable to x,y coordinates in tuples mc = [] # Variable to x coordinate as integer mx = [] # Variable to y coordinate as integer my = [] # Variable to store area marea = [] # Variable to store whether something is a square (1) or not (0) msquare = [] # Variable to store square approximation coordinates msquarecoords = [] # Variable to store child hierarchy element mchild = [] # Fitted rectangle height mheight = [] # Fitted rectangle width mwidth = [] # Ratio of height/width mwhratio = [] # Extract moments from contour image for x in range(0, len(contours)): mu.append(cv2.moments(contours[x])) marea.append(cv2.contourArea(contours[x])) mchild.append(int(hierarchy[0][x][2])) mindex.append(x) # Cycle through moment data and compute location for each moment for m in mu: if m['m00'] != 0: # This is the area term for a moment mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00'])) mx.append(int(m['m10'] / m['m00'])) my.append(int(m['m01'] / m['m00'])) else: mc.append((0, 0)) mx.append((0)) my.append((0)) # Loop over our contours and extract data about them for index, c in enumerate(contours): # Area isn't 0, but greater than min-area and less than max-area if marea[index] != 0 and min_area < marea[index] < max_area: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.1 * peri, True) center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle mwidth.append(wh[0]) mheight.append(wh[1]) mwhratio.append(wh[0] / wh[1]) msquare.append(len(approx)) # If the approx contour has 4 points then we can assume we have 4-sided objects if len(approx) == 4 or len(approx) == 5: msquarecoords.append(approx) else: # It's not square # msquare.append(0) msquarecoords.append(0) else: # Contour has area of 0, not interesting msquare.append(0) msquarecoords.append(0) mwidth.append(0) mheight.append(0) mwhratio.append(0) # Make a pandas df from data for filtering out junk all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio, 'area': marea, 'square': msquare, 'child': mchild} df = pd.DataFrame(all_contours) # Add calculated blur factor to output df['blurriness'] = blurfactor # Filter df for attributes that would isolate squares of reasonable size df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) & (df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)] # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares df = df[~(df['index'].isin(df['index'] + 1))] # Count up squares that are within a given radius, more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 6 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements filtered_area = df['area'] # Create empty matrix for storing comparisons sizecomp = np.zeros((len(filtered_area), len(filtered_area))) # Double loop through all areas to compare to each other for p in range(0, len(filtered_area)): for o in range(0, len(filtered_area)): big = max(filtered_area.iloc[p], filtered_area.iloc[o]) small = min(filtered_area.iloc[p], filtered_area.iloc[o]) pct = 100. * (small / big) sizecomp[p][o] = pct # How many comparisons given 90% square similarity sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1) # Append sizeprox summary to dataframe df = df.assign(sizeprox=sizematrix.values) # Reorder dataframe for better printing df = df[['index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square', 'child', 'blurriness', 'distprox', 'sizeprox']] # Loosely filter for size and distance (relative size to median) minsqwidth = median_sq_width_px * 0.80 maxsqwidth = median_sq_width_px * 1.2 df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)] # Filter for proximity again to root out stragglers. Find and count up squares that are within given radius, # more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius # for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 5 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Filter results for distance proximity to other squares df = df[(df['distprox'] >= 4)] # Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs: df['x'] = pd.to_numeric(df['x'], errors='coerce') df['y'] = pd.to_numeric(df['y'], errors='coerce') # Remove NaN df = df.dropna() if df['x'].min() is np.nan or df['y'].min() is np.nan: fatal_error('No color card found under current parameters') else: # Extract the starting coordinate start_coord = (df['x'].min(), df['y'].min()) # start_coord = (int(df['X'].min()), int(df['Y'].min())) # Calculate the range spacingx_short = (df['x'].max() - df['x'].min()) / 3 spacingy_short = (df['y'].max() - df['y'].min()) / 3 spacingx_long = (df['x'].max() - df['x'].min()) / 5 spacingy_long = (df['y'].max() - df['y'].min()) / 5 # Chip spacing since 4x6 card assumed spacing_short = min(spacingx_short, spacingy_short) spacing_long = max(spacingx_long, spacingy_long) # Smaller spacing measurement might have a chip missing spacing = int(max(spacing_short, spacing_long)) spacing = (spacing, spacing) if record_chip_size is not None: if record_chip_size.upper() == ""MEDIAN"": chip_size = df.loc[:,""area""].median() elif record_chip_size.upper() == ""MEAN"": chip_size = df.loc[:,""area""].mean() else: print(str(record_chip_size) + "" Is not a valid entry for record_chip_size."" + "" Must be either 'mean', 'median', or None."") chip_size = None # Store into global measurements outputs.add_observation(variable='color_chip_size', trait='size of color card chips identified', method='plantcv.plantcv.transform.find_color_card', scale='none', datatype=float, value=chip_size, label=str(record_chip_size)) return df, start_coord, spacing ","def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark', record_chip_size=""median""): """"""Automatically detects a color card and output info to use in create_color_card_mask function Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl. Inputs: rgb_img = Input RGB image data containing a color card. threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss') threshvalue = Thresholding value, optional (default 125) blurry = Bool (default False) if True then image sharpening applied background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram expansion applied to better detect edges, but histogram expansion will be hindered if there is a dark background record_chip_size = Optional str for choosing chip size measurement to be recorded, either ""median"", ""mean"", or None Returns: df = Dataframe containing information about the filtered contours start_coord = Two element tuple of starting coordinates, location of the top left pixel detected spacing = Two element tuple of spacing between centers of chips :param rgb_img: numpy.ndarray :param threshold: str :param threshvalue: int :param blurry: bool :param background: str :param record_chip_size: str :return df: pandas.core.frame.DataFrame :return start_coord: tuple :return spacing: tuple """""" # Imports import skimage import pandas as pd from scipy.spatial.distance import squareform, pdist # Get image attributes height, width, channels = rgb_img.shape total_pix = float(height * width) # Minimum and maximum square size based upon 12 MP image min_area = 1000. / 12000000. * total_pix max_area = 8000000. / 12000000. * total_pix # Create gray image for further processing gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY) # Laplacian Fourier Transform detection of blurriness blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var() # If image is blurry then try to deblur using kernel if blurry: # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1], [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1], [-1, -1, -1, -1, -1]]) / 8.0 # Store result back out for further processing gray_img = cv2.filter2D(gray_img, -1, kernel) # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu # thresholding. If your image has a bright background then apply if background == 'light': clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4)) # apply CLAHE histogram expansion to find squares better with canny edge detection gray_img = clahe.apply(gray_img) elif background != 'dark': fatal_error('Background parameter ' + str(background) + ' is not ""light"" or ""dark""!') # Thresholding if threshold_type.upper() == ""OTSU"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) elif threshold_type.upper() == ""NORMAL"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0) ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY) elif threshold_type.upper() == ""ADAPTGAUSS"": # Blur slightly so defects on card squares and background patterns are less likely to be picked up gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0) threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 51, 2) else: fatal_error('Input threshold_type=' + str(threshold_type) + ' but should be ""otsu"", ""normal"", or ""adaptgauss""!') # Apply automatic Canny edge detection using the computed median canny_edges = skimage.feature.canny(threshold) canny_edges.dtype = 'uint8' # Compute contours to find the squares of the card contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] # Variable of which contour is which mindex = [] # Variable to store moments mu = [] # Variable to x,y coordinates in tuples mc = [] # Variable to x coordinate as integer mx = [] # Variable to y coordinate as integer my = [] # Variable to store area marea = [] # Variable to store whether something is a square (1) or not (0) msquare = [] # Variable to store square approximation coordinates msquarecoords = [] # Variable to store child hierarchy element mchild = [] # Fitted rectangle height mheight = [] # Fitted rectangle width mwidth = [] # Ratio of height/width mwhratio = [] # Extract moments from contour image for x in range(0, len(contours)): mu.append(cv2.moments(contours[x])) marea.append(cv2.contourArea(contours[x])) mchild.append(int(hierarchy[0][x][2])) mindex.append(x) # Cycle through moment data and compute location for each moment for m in mu: if m['m00'] != 0: # This is the area term for a moment mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00'])) mx.append(int(m['m10'] / m['m00'])) my.append(int(m['m01'] / m['m00'])) else: mc.append((0, 0)) mx.append((0)) my.append((0)) # Loop over our contours and extract data about them for index, c in enumerate(contours): # Area isn't 0, but greater than min-area and less than max-area if marea[index] != 0 and min_area < marea[index] < max_area: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.1 * peri, True) center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle mwidth.append(wh[0]) mheight.append(wh[1]) mwhratio.append(wh[0] / wh[1]) msquare.append(len(approx)) # If the approx contour has 4 points then we can assume we have 4-sided objects if len(approx) == 4 or len(approx) == 5: msquarecoords.append(approx) else: # It's not square # msquare.append(0) msquarecoords.append(0) else: # Contour has area of 0, not interesting msquare.append(0) msquarecoords.append(0) mwidth.append(0) mheight.append(0) mwhratio.append(0) # Make a pandas df from data for filtering out junk all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio, 'area': marea, 'square': msquare, 'child': mchild} df = pd.DataFrame(all_contours) # Add calculated blur factor to output df['blurriness'] = blurfactor # Filter df for attributes that would isolate squares of reasonable size df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) & (df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)] # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares df = df[~(df['index'].isin(df['index'] + 1))] # Count up squares that are within a given radius, more squares = more likelihood of them being the card # Median width of square time 2.5 gives proximity radius for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 6 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements filtered_area = df['area'] # Create empty matrix for storing comparisons sizecomp = np.zeros((len(filtered_area), len(filtered_area))) # Double loop through all areas to compare to each other for p in range(0, len(filtered_area)): for o in range(0, len(filtered_area)): big = max(filtered_area.iloc[p], filtered_area.iloc[o]) small = min(filtered_area.iloc[p], filtered_area.iloc[o]) pct = 100. * (small / big) sizecomp[p][o] = pct # How many comparisons given 90% square similarity sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1) # Append sizeprox summary to dataframe df = df.assign(sizeprox=sizematrix.values) # Reorder dataframe for better printing df = df[['index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square', 'child', 'blurriness', 'distprox', 'sizeprox']] # Loosely filter for size and distance (relative size to median) minsqwidth = median_sq_width_px * 0.80 maxsqwidth = median_sq_width_px * 1.2 df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)] # Filter for proximity again to root out stragglers. Find and count up squares that are within given radius, # more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius # for searching for similar squares median_sq_width_px = df[""width""].median() # Squares that are within 6 widths of the current square pixeldist = median_sq_width_px * 5 # Computes euclidean distance matrix for the x and y contour centroids distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']]))) # Add up distances that are less than ones have distance less than pixeldist pixels distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1) # Append distprox summary to dataframe df = df.assign(distprox=distmatrixflat.values) # Filter results for distance proximity to other squares df = df[(df['distprox'] >= 4)] # Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs: df['x'] = pd.to_numeric(df['x'], errors='coerce') df['y'] = pd.to_numeric(df['y'], errors='coerce') # Remove NaN df = df.dropna() if df['x'].min() is np.nan or df['y'].min() is np.nan: fatal_error('No color card found under current parameters') else: # Extract the starting coordinate start_coord = (df['x'].min(), df['y'].min()) # start_coord = (int(df['X'].min()), int(df['Y'].min())) # Calculate the range spacingx_short = (df['x'].max() - df['x'].min()) / 3 spacingy_short = (df['y'].max() - df['y'].min()) / 3 spacingx_long = (df['x'].max() - df['x'].min()) / 5 spacingy_long = (df['y'].max() - df['y'].min()) / 5 # Chip spacing since 4x6 card assumed spacing_short = min(spacingx_short, spacingy_short) spacing_long = max(spacingx_long, spacingy_long) # Smaller spacing measurement might have a chip missing spacing = int(max(spacing_short, spacing_long)) spacing = (spacing, spacing) if record_chip_size is not None: if record_chip_size.upper() == ""MEDIAN"": chip_size = df.loc[:,""area""].median() elif record_chip_size.upper() == ""MEAN"": chip_size = df.loc[:,""area""].mean() else: print(str(record_chip_size) + "" Is not a valid entry for record_chip_size."" + "" Must be either 'mean', 'median', or None."") chip_size = None # Store into global measurements outputs.add_observation(variable='color_chip_size', trait='size of color card chips identified', method='plantcv.plantcv.transform.find_color_card', scale='none', datatype=float, value=chip_size, label=str(record_chip_size)) return df, start_coord, spacing " 40775,"def initialize(backend: str, **kwargs: Any) -> None: """"""Initializes distributed configuration according to provided ``backend`` Args: backend: backend: `nccl`, `gloo`, `xla-tpu`, `horovod`. kwargs: acceptable kwargs according to provided backend: - | ""nccl"" or ""gloo"" : ``timeout(=timedelta(minutes=30))``, ``init_method(=None)``, | ``rank(=None)``, ``world_size(=None)``. | By default, ``init_method`` will be ""env://"". See more info about parameters: `torch_init`_. - | ""horovod"" : comm(=None), more info: `hvd_init`_. Examples: Launch single node multi-GPU training with ``torchrun`` or ``torch.distributed.launch`` utility. .. code-block:: bash # >>> torchrun -nproc_per_node=4 main.py # or # >>> python -m torch.distributed.launch --nproc_per_node=4 main.py # main.py import ignite.distributed as idist def train_fn(local_rank, a, b, c): import torch.distributed as dist assert dist.is_available() and dist.is_initialized() assert dist.get_world_size() == 4 device = idist.device() assert device == torch.device(f""cuda:{local_rank}"") backend = ""nccl"" # or ""gloo"" or ""horovod"" or ""xla-tpu"" idist.initialize(backend) # or for torch native distributed on Windows: # idist.initialize(""nccl"", init_method=""file://tmp/shared"") local_rank = idist.get_local_rank() train_fn(local_rank, a, b, c) idist.finalize() .. _torch_init: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group .. _hvd_init: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.torch .. versionchanged:: 0.4.2 ``backend`` now accepts `horovod` distributed framework. .. versionchanged:: 0.4.5 ``kwargs`` now accepts ``init_method``, ``rank``, ``world_size`` for PyTorch native distributed backend. """""" if not (has_xla_support or has_native_dist_support or has_hvd_support): # nothing to do => serial model # maybe warn about this return _assert_backend(backend) for comp_model_cls in registered_computation_models: if backend not in comp_model_cls.available_backends: continue _set_model(comp_model_cls(backend, **kwargs)) ","def initialize(backend: str, **kwargs: Any) -> None: """"""Initializes distributed configuration according to provided ``backend`` Args: backend: backend: `nccl`, `gloo`, `xla-tpu`, `horovod`. kwargs: acceptable kwargs according to provided backend: - | ""nccl"" or ""gloo"" : ``timeout(=timedelta(minutes=30))``, ``init_method(=None)``, | ``rank(=None)``, ``world_size(=None)``. | By default, ``init_method`` will be ""env://"". See more info about parameters: `torch_init`_. - | ""horovod"" : comm(=None), more info: `hvd_init`_. Examples: Launch single node multi-GPU training with ``torchrun`` or ``torch.distributed.run`` utility. .. code-block:: bash # >>> torchrun -nproc_per_node=4 main.py # or # >>> python -m torch.distributed.launch --nproc_per_node=4 main.py # main.py import ignite.distributed as idist def train_fn(local_rank, a, b, c): import torch.distributed as dist assert dist.is_available() and dist.is_initialized() assert dist.get_world_size() == 4 device = idist.device() assert device == torch.device(f""cuda:{local_rank}"") backend = ""nccl"" # or ""gloo"" or ""horovod"" or ""xla-tpu"" idist.initialize(backend) # or for torch native distributed on Windows: # idist.initialize(""nccl"", init_method=""file://tmp/shared"") local_rank = idist.get_local_rank() train_fn(local_rank, a, b, c) idist.finalize() .. _torch_init: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group .. _hvd_init: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.torch .. versionchanged:: 0.4.2 ``backend`` now accepts `horovod` distributed framework. .. versionchanged:: 0.4.5 ``kwargs`` now accepts ``init_method``, ``rank``, ``world_size`` for PyTorch native distributed backend. """""" if not (has_xla_support or has_native_dist_support or has_hvd_support): # nothing to do => serial model # maybe warn about this return _assert_backend(backend) for comp_model_cls in registered_computation_models: if backend not in comp_model_cls.available_backends: continue _set_model(comp_model_cls(backend, **kwargs)) " 47497,"def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment accelerator = Accelerator(log_with=""all"", logging_dir=args.output_dir) if args.with_tracking else Accelerator() logger.info(accelerator.state) # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, "".gitignore""), ""w+"") as gitignore: if ""step_*"" not in gitignore: gitignore.write(""step_*\n"") if ""epoch_*"" not in gitignore: gitignore.write(""epoch_*\n"") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset(args.dataset_name, task=""image-classification"") else: data_files = {} if args.train_dir is not None: data_files[""train""] = os.path.join(args.train_dir, ""**"") if args.validation_dir is not None: data_files[""validation""] = os.path.join(args.validation_dir, ""**"") dataset = load_dataset( ""imagefolder"", data_files=data_files, cache_dir=args.cache_dir, task=""image-classification"", ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder. # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if ""validation"" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset[""train""].train_test_split(args.train_val_split) dataset[""train""] = split[""train""] dataset[""validation""] = split[""test""] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset[""train""].features[""labels""].names label2id = {label: str(i) for i, label in enumerate(labels)} id2label = {str(i): label for i, label in enumerate(labels)} # Load pretrained model and feature extractor # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=len(labels), i2label=id2label, label2id=label2id, finetuning_task=""image-classification"", ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForImageClassification.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image. normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) train_transforms = Compose( [ RandomResizedCrop(feature_extractor.size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(feature_extractor.size), CenterCrop(feature_extractor.size), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """"""Apply _train_transforms across a batch."""""" example_batch[""pixel_values""] = [train_transforms(image.convert(""RGB"")) for image in example_batch[""image""]] return example_batch def preprocess_val(example_batch): """"""Apply _val_transforms across a batch."""""" example_batch[""pixel_values""] = [val_transforms(image.convert(""RGB"")) for image in example_batch[""image""]] return example_batch with accelerator.main_process_first(): if args.max_train_samples is not None: dataset[""train""] = dataset[""train""].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset[""train""].with_transform(preprocess_train) if args.max_eval_samples is not None: dataset[""validation""] = dataset[""validation""].shuffle(seed=args.seed).select(range(args.max_eval_samples)) # Set the validation transforms eval_dataset = dataset[""validation""].with_transform(preprocess_val) # DataLoaders creation: def collate_fn(examples): pixel_values = torch.stack([example[""pixel_values""] for example in examples]) labels = torch.tensor([example[""labels""] for example in examples]) return {""pixel_values"": pixel_values, ""labels"": labels} train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Figure out how many steps we should save the Accelerator states if hasattr(args.checkpointing_steps, ""isdigit""): checkpointing_steps = args.checkpointing_steps if args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config[""lr_scheduler_type""] = experiment_config[""lr_scheduler_type""].value accelerator.init_trackers(""image_classification_no_trainer"", experiment_config) # Get the metric function metric = load_metric(""accuracy"") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != """": accelerator.print(f""Resumed from checkpoint: {args.resume_from_checkpoint}"") accelerator.load_state(args.resume_from_checkpoint) resume_step = None path = args.resume_from_checkpoint else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last if ""epoch"" in path: args.num_train_epochs -= int(path.replace(""epoch_"", """")) else: resume_step = int(path.replace(""step_"", """")) args.num_train_epochs -= resume_step // len(train_dataloader) resume_step = (args.num_train_epochs * len(train_dataloader)) - resume_step for epoch in range(args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == 0 and step < resume_step: continue outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f""step_{completed_steps}"" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress {completed_steps} steps"", blocking=False, auto_lfs_prune=True, ) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch[""labels""])) # If we are in a multiprocessing environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader): predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() logger.info(f""epoch {epoch}: {eval_metric}"") if args.with_tracking: accelerator.log( { ""accuracy"": eval_metric, ""train_loss"": total_loss, ""epoch"": epoch, ""step"": completed_steps, }, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress epoch {epoch}"", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == ""epoch"": output_dir = f""epoch_{epoch}"" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message=""End of training"", auto_lfs_prune=True) if args.output_dir is not None: with open(os.path.join(args.output_dir, ""all_results.json""), ""w"") as f: json.dump({""eval_accuracy"": eval_metric[""accuracy""]}, f) ","def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment accelerator = Accelerator(log_with=""all"", logging_dir=args.output_dir) if args.with_tracking else Accelerator() logger.info(accelerator.state) # Make one log on every process with the configuration for debugging. logging.basicConfig( format=""%(asctime)s - %(levelname)s - %(name)s - %(message)s"", datefmt=""%m/%d/%Y %H:%M:%S"", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, "".gitignore""), ""w+"") as gitignore: if ""step_*"" not in gitignore: gitignore.write(""step_*\n"") if ""epoch_*"" not in gitignore: gitignore.write(""epoch_*\n"") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset(args.dataset_name, task=""image-classification"") else: data_files = {} if args.train_dir is not None: data_files[""train""] = os.path.join(args.train_dir, ""**"") if args.validation_dir is not None: data_files[""validation""] = os.path.join(args.validation_dir, ""**"") dataset = load_dataset( ""imagefolder"", data_files=data_files, cache_dir=args.cache_dir, task=""image-classification"", ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder. # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if ""validation"" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset[""train""].train_test_split(args.train_val_split) dataset[""train""] = split[""train""] dataset[""validation""] = split[""test""] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset[""train""].features[""labels""].names label2id = {label: str(i) for i, label in enumerate(labels)} id2label = {str(i): label for i, label in enumerate(labels)} # Load pretrained model and feature extractor # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=len(labels), i2label=id2label, label2id=label2id, finetuning_task=""image-classification"", ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForImageClassification.from_pretrained( args.model_name_or_path, from_tf=bool("".ckpt"" in args.model_name_or_path), config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image. normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) train_transforms = Compose( [ RandomResizedCrop(feature_extractor.size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(feature_extractor.size), CenterCrop(feature_extractor.size), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """"""Apply _train_transforms across a batch."""""" example_batch[""pixel_values""] = [train_transforms(image.convert(""RGB"")) for image in example_batch[""image""]] return example_batch def preprocess_val(example_batch): """"""Apply _val_transforms across a batch."""""" example_batch[""pixel_values""] = [val_transforms(image.convert(""RGB"")) for image in example_batch[""image""]] return example_batch with accelerator.main_process_first(): if args.max_train_samples is not None: dataset[""train""] = dataset[""train""].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset[""train""].with_transform(preprocess_train) if args.max_eval_samples is not None: dataset[""validation""] = dataset[""validation""].shuffle(seed=args.seed).select(range(args.max_eval_samples)) # Set the validation transforms eval_dataset = dataset[""validation""].with_transform(preprocess_val) # DataLoaders creation: def collate_fn(examples): pixel_values = torch.stack([example[""pixel_values""] for example in examples]) labels = torch.tensor([example[""labels""] for example in examples]) return {""pixel_values"": pixel_values, ""labels"": labels} train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = [""bias"", ""LayerNorm.weight""] optimizer_grouped_parameters = [ { ""params"": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], ""weight_decay"": args.weight_decay, }, { ""params"": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], ""weight_decay"": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Figure out how many steps we should save the Accelerator states if hasattr(args.checkpointing_steps, ""isdigit""): checkpointing_steps = args.checkpointing_steps if args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config[""lr_scheduler_type""] = experiment_config[""lr_scheduler_type""].value accelerator.init_trackers(""image_classification_no_trainer"", experiment_config) # Get the metric function metric = load_metric(""accuracy"") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info(""***** Running training *****"") logger.info(f"" Num examples = {len(train_dataset)}"") logger.info(f"" Num Epochs = {args.num_train_epochs}"") logger.info(f"" Instantaneous batch size per device = {args.per_device_train_batch_size}"") logger.info(f"" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"") logger.info(f"" Gradient Accumulation steps = {args.gradient_accumulation_steps}"") logger.info(f"" Total optimization steps = {args.max_train_steps}"") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != """": accelerator.print(f""Resumed from checkpoint: {args.resume_from_checkpoint}"") accelerator.load_state(args.resume_from_checkpoint) resume_step = None path = args.resume_from_checkpoint else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last if ""epoch"" in path: args.num_train_epochs -= int(path.replace(""epoch_"", """")) else: resume_step = int(path.replace(""step_"", """")) args.num_train_epochs -= resume_step // len(train_dataloader) resume_step = (args.num_train_epochs * len(train_dataloader)) - resume_step for epoch in range(args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == 0 and step < resume_step: continue outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f""step_{completed_steps}"" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress {completed_steps} steps"", blocking=False, auto_lfs_prune=True, ) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch[""labels""])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader): predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() logger.info(f""epoch {epoch}: {eval_metric}"") if args.with_tracking: accelerator.log( { ""accuracy"": eval_metric, ""train_loss"": total_loss, ""epoch"": epoch, ""step"": completed_steps, }, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f""Training in progress epoch {epoch}"", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == ""epoch"": output_dir = f""epoch_{epoch}"" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message=""End of training"", auto_lfs_prune=True) if args.output_dir is not None: with open(os.path.join(args.output_dir, ""all_results.json""), ""w"") as f: json.dump({""eval_accuracy"": eval_metric[""accuracy""]}, f) " 31459,"def decode_screenshot(r): i = BytesIO(r.content) res = fileResult('myfile', i.read()) return res ","def decode_screenshot(r): i = BytesIO(r.content) res = fileResult('myfile', i.read(), file_type=EntryTypes.ENTRY_INFO_FILE) return res " 30651,"def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" params = demisto.params() username = params.get('credentials', {}).get('identifier', '') password = params.get('credentials', {}).get('password', '') base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url'] verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) demisto.debug(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, verify=verify_certificate, auth=(username, password), proxy=proxy, ok_codes=(200, 201, 204), headers={'accept': ""application/json""} ) if demisto.command() == 'test-module': result = test_module(client) return_outputs(result) elif demisto.command() == 'guardian-search': result = search_by_query(client, demisto.args()) return_outputs(result[0], result[1], result[2]) elif demisto.command() == 'guardian-list-all-assets': result = list_all_assets(client) return_outputs(result[0], result[1], result[2]) elif demisto.command() == 'guardian-find-ip-by-mac': result = find_ip_by_mac(client, demisto.args()) return_outputs(result[0], result[1], result[2]) except Exception as e: return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}')) ","def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" params = demisto.params() username = params.get('credentials', {}).get('identifier', '') password = params.get('credentials', {}).get('password', '') base_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url'] verify_certificate = not demisto.params().get('insecure', False) proxy = params.get('proxy', False) demisto.debug(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, verify=verify_certificate, auth=(username, password), proxy=proxy, ok_codes=(200, 201, 204), headers={'accept': ""application/json""} ) if demisto.command() == 'test-module': result = test_module(client) return_outputs(result) elif demisto.command() == 'guardian-search': result = search_by_query(client, demisto.args()) return_outputs(result[0], result[1], result[2]) elif demisto.command() == 'guardian-list-all-assets': result = list_all_assets(client) return_outputs(result[0], result[1], result[2]) elif demisto.command() == 'guardian-find-ip-by-mac': result = find_ip_by_mac(client, demisto.args()) return_outputs(result[0], result[1], result[2]) except Exception as e: return_error(str(f'Failed to execute {demisto.command()} command. Error: {str(e)}')) " 5669,"def somersd(x, y=None): """""" Calculates Somers' D, an asymmetric measure of ordinal association Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the correspondence between two rankings. Both statistics consider the difference between the number of concordant and discordant pairs in two rankings :math:`X` and :math:`Y`, and both are normalized such that values close to 1 indicate strong agreement and values close to -1 indicate strong disagreement. They differ in how they are normalized. To show the relationship, Somers' :math:`D` can be defined in terms of Kendall's :math:`\tau_a`: .. math:: D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)} Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of :math:`n` rankings can also be viewed as an :math:`r \\times s` contingency table in which element :math:`i, j` is the number of rank pairs with rank :math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`. Accordingly, `somersd` also allows the input data to be supplied as a single, 2D contingency table instead of as two separate, 1D rankings. Note that the definition of Somers' :math:`D` is asymmetric: in general, :math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers' :math:`D(Y|X)`: the ""row"" variable :math:`X` is treated as an independent variable, and the ""column"" variable :math:`Y` is dependent. For Somers' :math:`D(X|Y)`, swap the input lists or transpose the input table. Parameters ---------- x: array_like 1D array of rankings, treated as the (row) independent variable. Alternatively, a 2D contingency table. y: array_like If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the same length, treated as the (column) dependent variable. If `x` is 2D, `y` is ignored. Returns ------- res : SomersDResult A `SomersDResult` object with the following fields: correlation : float The Somers' :math:`D` statistic. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is an absence of association, :math:`D=0`. table : 2D array The contingency table formed from rankings `x` and `y` (or the provided contingency table, if `x` is a 2D array) See Also -------- kendalltau : Calculates Kendall’s tau, another correlation measure. weightedtau : Computes a weighted version of Kendall's tau. spearmanr : Calculates a Spearman rank-order correlation coefficient. pearsonr : Calculates a Pearson correlation coefficient. Notes ----- This function follows the contingency table approach of [2]_ and [3]_ rather than relying on `scipy.stats.kendalltau`, and *p*-values are computed based on an asymptotic approximation. Theoretically, the *p*-values corresponding with hypothesis tests based on :math:'tau' and :math:'D' should be identical, but the *p*-values returned by `scipy.stats.kendalltau` are based on a different approximation. Contingency tables are formatted according to the convention used by SAS and R: the first ranking supplied (``x``) is the ""row"" variable, and the second ranking supplied (``y``) is the ""column"" variable. This is opposite the convention of Somers' original paper [1]_. References ---------- .. [1] Robert H. Somers, ""A New Asymmetric Measure of Association for Ordinal Variables"", *American Sociological Review*, Vol. 27, No. 6, pp. 799--811, 1962. .. [2] Morton B. Brown and Jacqueline K. Benedetti, ""Sampling Behavior of Tests for Correlation in Two-Way Contingency Tables"", *Journal of the American Statistical Association* Vol. 72, No. 358, pp. 309--315, 1977. .. [3] SAS Institute, Inc., ""The FREQ Procedure (Book Excerpt)"", *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009. .. [4] Laerd Statistics, ""Somers' d using SPSS Statistics"", *SPSS Statistics Tutorials and Statistical Guides*, https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php, Accessed July 31, 2020. Examples -------- We calculate Somers' D for the example given in [4]_, in which a hotel chain owner seeks to determine the association between hotel room cleanliness and customer satisfaction. The independent variable, hotel room cleanliness, is ranked on an ordinal scale: ""below average (1)"", ""average (2)"", or ""above average (3)"". The dependent variable, customer satisfaction, is ranked on a second scale: ""very dissatisfied (1)"", ""moderately dissatisfied (2)"", ""neither dissatisfied nor satisfied (3)"", ""moderately satisfied (4)"", or ""very satisfied (5)"". 189 customers respond to the survey, and the results are cast into a contingency table with the hotel room cleanliness as the ""row"" variable and customer satisfaction as the ""column"" variable. +-----+-----+-----+-----+-----+-----+ | | (1) | (2) | (3) | (4) | (5) | +=====+=====+=====+=====+=====+=====+ | (1) | 27 | 25 | 14 | 7 | 0 | +-----+-----+-----+-----+-----+-----+ | (2) | 7 | 14 | 18 | 35 | 12 | +-----+-----+-----+-----+-----+-----+ | (3) | 1 | 3 | 2 | 7 | 17 | +-----+-----+-----+-----+-----+-----+ For example, 27 customers assigned their room a cleanliness ranking of ""below average (1)"" and a corresponding satisfaction of ""very dissatisfied (1)"". We perform the analysis as follows. >>> from scipy.stats import somersd >>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]] >>> res = somersd(table) >>> res.statistic 0.6032766111513396 >>> res.pvalue 1.0007091191074533e-27 The value of the Somers' D statistic is approximately 0.6, indicating a positive correlation between room cleanliness and customer satisfaction in the sample. The *p*-value is very small, indicating a very small probability of observing such an extreme value of the statistic under the null hypothesis that the statistic of the entire population (from which our sample of 189 customers is drawn) is zero. This supports the alternative hypothesis that the true value of Somers' D for the population is nonzero. """""" x, y = np.array(x), np.array(y) if x.ndim == 1: table = _lists_to_ct(x, y) elif x.ndim == 2: table = x else: raise ValueError(""x must be either a 1D or 2D array"") d, p = _somers_d(table) return SomersDResult(d, p, table) ","def somersd(x, y=None): r"""""" Calculates Somers' D, an asymmetric measure of ordinal association Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the correspondence between two rankings. Both statistics consider the difference between the number of concordant and discordant pairs in two rankings :math:`X` and :math:`Y`, and both are normalized such that values close to 1 indicate strong agreement and values close to -1 indicate strong disagreement. They differ in how they are normalized. To show the relationship, Somers' :math:`D` can be defined in terms of Kendall's :math:`\tau_a`: .. math:: D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)} Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of :math:`n` rankings can also be viewed as an :math:`r \\times s` contingency table in which element :math:`i, j` is the number of rank pairs with rank :math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`. Accordingly, `somersd` also allows the input data to be supplied as a single, 2D contingency table instead of as two separate, 1D rankings. Note that the definition of Somers' :math:`D` is asymmetric: in general, :math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers' :math:`D(Y|X)`: the ""row"" variable :math:`X` is treated as an independent variable, and the ""column"" variable :math:`Y` is dependent. For Somers' :math:`D(X|Y)`, swap the input lists or transpose the input table. Parameters ---------- x: array_like 1D array of rankings, treated as the (row) independent variable. Alternatively, a 2D contingency table. y: array_like If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the same length, treated as the (column) dependent variable. If `x` is 2D, `y` is ignored. Returns ------- res : SomersDResult A `SomersDResult` object with the following fields: correlation : float The Somers' :math:`D` statistic. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is an absence of association, :math:`D=0`. table : 2D array The contingency table formed from rankings `x` and `y` (or the provided contingency table, if `x` is a 2D array) See Also -------- kendalltau : Calculates Kendall’s tau, another correlation measure. weightedtau : Computes a weighted version of Kendall's tau. spearmanr : Calculates a Spearman rank-order correlation coefficient. pearsonr : Calculates a Pearson correlation coefficient. Notes ----- This function follows the contingency table approach of [2]_ and [3]_ rather than relying on `scipy.stats.kendalltau`, and *p*-values are computed based on an asymptotic approximation. Theoretically, the *p*-values corresponding with hypothesis tests based on :math:'tau' and :math:'D' should be identical, but the *p*-values returned by `scipy.stats.kendalltau` are based on a different approximation. Contingency tables are formatted according to the convention used by SAS and R: the first ranking supplied (``x``) is the ""row"" variable, and the second ranking supplied (``y``) is the ""column"" variable. This is opposite the convention of Somers' original paper [1]_. References ---------- .. [1] Robert H. Somers, ""A New Asymmetric Measure of Association for Ordinal Variables"", *American Sociological Review*, Vol. 27, No. 6, pp. 799--811, 1962. .. [2] Morton B. Brown and Jacqueline K. Benedetti, ""Sampling Behavior of Tests for Correlation in Two-Way Contingency Tables"", *Journal of the American Statistical Association* Vol. 72, No. 358, pp. 309--315, 1977. .. [3] SAS Institute, Inc., ""The FREQ Procedure (Book Excerpt)"", *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009. .. [4] Laerd Statistics, ""Somers' d using SPSS Statistics"", *SPSS Statistics Tutorials and Statistical Guides*, https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php, Accessed July 31, 2020. Examples -------- We calculate Somers' D for the example given in [4]_, in which a hotel chain owner seeks to determine the association between hotel room cleanliness and customer satisfaction. The independent variable, hotel room cleanliness, is ranked on an ordinal scale: ""below average (1)"", ""average (2)"", or ""above average (3)"". The dependent variable, customer satisfaction, is ranked on a second scale: ""very dissatisfied (1)"", ""moderately dissatisfied (2)"", ""neither dissatisfied nor satisfied (3)"", ""moderately satisfied (4)"", or ""very satisfied (5)"". 189 customers respond to the survey, and the results are cast into a contingency table with the hotel room cleanliness as the ""row"" variable and customer satisfaction as the ""column"" variable. +-----+-----+-----+-----+-----+-----+ | | (1) | (2) | (3) | (4) | (5) | +=====+=====+=====+=====+=====+=====+ | (1) | 27 | 25 | 14 | 7 | 0 | +-----+-----+-----+-----+-----+-----+ | (2) | 7 | 14 | 18 | 35 | 12 | +-----+-----+-----+-----+-----+-----+ | (3) | 1 | 3 | 2 | 7 | 17 | +-----+-----+-----+-----+-----+-----+ For example, 27 customers assigned their room a cleanliness ranking of ""below average (1)"" and a corresponding satisfaction of ""very dissatisfied (1)"". We perform the analysis as follows. >>> from scipy.stats import somersd >>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]] >>> res = somersd(table) >>> res.statistic 0.6032766111513396 >>> res.pvalue 1.0007091191074533e-27 The value of the Somers' D statistic is approximately 0.6, indicating a positive correlation between room cleanliness and customer satisfaction in the sample. The *p*-value is very small, indicating a very small probability of observing such an extreme value of the statistic under the null hypothesis that the statistic of the entire population (from which our sample of 189 customers is drawn) is zero. This supports the alternative hypothesis that the true value of Somers' D for the population is nonzero. """""" x, y = np.array(x), np.array(y) if x.ndim == 1: table = _lists_to_ct(x, y) elif x.ndim == 2: table = x else: raise ValueError(""x must be either a 1D or 2D array"") d, p = _somers_d(table) return SomersDResult(d, p, table) " 711,"def createResolver(servers=None, resolvconf=None, hosts=None): if hosts is None: hosts = (b'/etc/hosts' if platform.getType() == 'posix' else r'c:\windows\hosts') theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) L = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(L) ","def createResolver(servers=None, resolvconf=None, hosts=None): if hosts is None: hosts = b'/etc/hosts' if platform.getType() == 'posix' else r'c:\windows\hosts' theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) L = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(L) " 49561,"def test_bag_groupby_pure_hash(bagsequence): """"""Test to ensure that `groupby` is grouping properly, when using 'pure' hashes like integers. Eg., hash(False) == False, but hash('test') != 'test'. This is testing the hash(False) == False case."""""" assert isinstance(bagsequence, Bag) # Probably a cleaner way to do this (maybe make them sets instead of lists??) result = sorted(bagsequence.groupby(iseven).compute()) ordered_result = [(elem[0], sorted(elem[1])) for elem in result] assert ordered_result == [(False, [1, 3, 5, 7, 9]), (True, [0, 2, 4, 6, 8])] ","def test_bag_groupby_pure_hash(): # https://github.com/dask/dask/issues/6640 result = b.groupby(iseven).compute() assert result == [(False, [1, 3] * 3), (True, [0, 2, 4] * 3)] " 5454,"def __virtual__(): global IP global HOST # metadata server information metadata_server_host = __opts__.get(""metadata_server_host"", """") if metadata_server_host != """": IP = metadata_server_host HOST = ""http://{}/"".format(IP) if __opts__.get(""metadata_server_grains"", False) is False: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(0.1) result = sock.connect_ex((IP, 80)) if result != 0: return False if http.query(os.path.join(HOST, ""latest/""), status=True).get(""status"") != 200: return False return True ","def __virtual__(): global IP global HOST # metadata server information metadata_server_host = __opts__.get(""metadata_server_host"") if metadata_server_host != """": IP = metadata_server_host HOST = ""http://{}/"".format(IP) if __opts__.get(""metadata_server_grains"", False) is False: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(0.1) result = sock.connect_ex((IP, 80)) if result != 0: return False if http.query(os.path.join(HOST, ""latest/""), status=True).get(""status"") != 200: return False return True " 6454,"def prepare_data(data, filters): """"""Prepare consolidated Report data and Chart data"""""" material_request_map, item_qty_map = {}, {} for row in data: # item wise map for charts if not row[""item_code""] in item_qty_map: item_qty_map[row[""item_code""]] = { ""qty"" : row[""qty""], ""ordered_qty"" : row[""ordered_qty""], ""received_qty"": row[""received_qty""], ""pending_qty"": row[""pending_qty""], ""qty_to_order"" : row[""qty_to_order""], } else: item_entry = item_qty_map[row[""item_code""]] update_qty_columns(item_entry, row) if filters.get(""group_by_mr""): # consolidated material request map for group by filter if not row[""material_request""] in material_request_map: # create an entry with mr as key row_copy = copy.deepcopy(row) material_request_map[row[""material_request""]] = row_copy else: mr_row = material_request_map[row[""material_request""]] mr_row[""required_date""] = min(getdate(mr_row[""required_date""]), getdate(row[""required_date""])) #sum numeric columns update_qty_columns(mr_row, row) chart_data = prepare_chart_data(item_qty_map) if filters.get(""group_by_mr""): data =[] for mr in material_request_map: data.append(material_request_map[mr]) return data, chart_data return data, chart_data ","def prepare_data(data, filters): """"""Prepare consolidated Report data and Chart data"""""" material_request_map, item_qty_map = {}, {} for row in data: # item wise map for charts if not row[""item_code""] in item_qty_map: item_qty_map[row[""item_code""]] = { ""qty"" : row[""qty""], ""ordered_qty"" : row[""ordered_qty""], ""received_qty"": row[""received_qty""], ""qty_to_receive"": row[""qty_to_receive""], ""qty_to_order"" : row[""qty_to_order""], } else: item_entry = item_qty_map[row[""item_code""]] update_qty_columns(item_entry, row) if filters.get(""group_by_mr""): # consolidated material request map for group by filter if not row[""material_request""] in material_request_map: # create an entry with mr as key row_copy = copy.deepcopy(row) material_request_map[row[""material_request""]] = row_copy else: mr_row = material_request_map[row[""material_request""]] mr_row[""required_date""] = min(getdate(mr_row[""required_date""]), getdate(row[""required_date""])) #sum numeric columns update_qty_columns(mr_row, row) chart_data = prepare_chart_data(item_qty_map) if filters.get(""group_by_mr""): data =[] for mr in material_request_map: data.append(material_request_map[mr]) return data, chart_data return data, chart_data " 44594,"def prepare( ursadb: str, workdir: Path, path: Path, batch: int, max_file_size: int, mounted_as: str, ) -> None: if not workdir.exists(): workdir.mkdir() logging.info(""Prepare.1: load all indexed files into memory."") ursa = UrsaDb(ursadb) fileset = all_indexed_files(ursa) logging.info(""Prepare.2: find all new files."") tmpfile = None current_batch = 10 ** 20 # As good as infinity. new_files = 0 batch_id = 0 for f in find_new_files(fileset, path, mounted_as): if Path(f).stat().st_size > max_file_size: continue if current_batch > batch: if tmpfile is not None: tmpfile.close() current_batch = 0 tmppath = workdir / f""batch_{batch_id:010}.txt"" tmpfile = tmppath.open(mode=""w"") batch_id += 1 assert tmpfile is not None # Let mypy know the obvious. tmpfile.write(f""{f}\n"") current_batch += 1 new_files += 1 if tmpfile is not None: tmpfile.close() logging.info( ""Prepare.3: Got %s files in %s batches to index."", new_files, batch_id ) ","def prepare( ursadb: str, workdir: Path, path: Path, batch: int, max_file_size: int, mounted_as: str, ) -> None: if not workdir.exists(): workdir.mkdir() logging.info(""Prepare.1: load all indexed files into memory."") ursa = UrsaDb(ursadb) fileset = all_indexed_files(ursa) logging.info(""Prepare.2: find all new files."") tmpfile = None current_batch = 10 ** 20 # As good as infinity. new_files = 0 batch_id = 0 for f in find_new_files(fileset, path, mounted_as): if Path(f).stat().st_size > max_file_size: continue if current_batch > batch: if tmpfile is not None: tmpfile.close() current_batch = 0 if current_batch is None or current_batch > batch: tmpfile = tmppath.open(mode=""w"") batch_id += 1 assert tmpfile is not None # Let mypy know the obvious. tmpfile.write(f""{f}\n"") current_batch += 1 new_files += 1 if tmpfile is not None: tmpfile.close() logging.info( ""Prepare.3: Got %s files in %s batches to index."", new_files, batch_id ) " 51824,"def view_func_parser(parsed_name): # What method are we using for this view if parsed_name in (""hardlink"", ""hard""): link_fn = view_hardlink elif parsed_name in (""copy"", ""relocate""): link_fn = view_copy else: link_fn = view_symlink return link_fn ","def view_func_parser(parsed_name): # What method are we using for this view if parsed_name in (""hardlink"", ""hard""): return view_hardlink elif parsed_name in (""copy"", ""relocate""): return view_copy elif parsed_name == ""symlink"": return view_symlink else: raise ValueError(""invalid link type for view: '%s'"" % parsed_name) " 25795,"def value_length(length, null=False): """""" Return a value check function which raises a ValueError if the supplied value has a length greater than 'length' If null is not True raises a ValueError if the supplied value is None. """""" def checker(v): if null and v is None: return checker if len(v) > length: raise ValueError(v) return checker ","def value_length(length, allow_null=False): """""" Return a value check function which raises a ValueError if the supplied value has a length greater than 'length' If null is not True raises a ValueError if the supplied value is None. """""" def checker(v): if null and v is None: return checker if len(v) > length: raise ValueError(v) return checker " 39469,"def parse_device(svdfile): tree = ET.parse(svdfile) temp = os.stat(svdfile).st_mtime dname = tree.findtext('name') peripherals = {} device_fields_total = 0 device_fields_documented = 0 for ptag in tree.iter('peripheral'): registers = {} peripheral_fields_total = 0 peripheral_fields_documented = 0 pname = ptag.findtext('name') pbase = ptag.findtext('baseAddress') if 'derivedFrom' in ptag.attrib: dfname = ptag.attrib['derivedFrom'] dffrom = tree.findall("".//peripheral/[name='"" + dfname + ""']"") if dffrom: ptag = dffrom[0] else: print(""Can't find derivedFrom={} for {}"" .format(dfname, pname)) continue pdesc = ptag.findtext('description') for rtag in ptag.iter('register'): fields = {} register_fields_total = 0 register_fields_documented = 0 rname = rtag.findtext('name') rdesc = rtag.findtext('description') rrstv = rtag.findtext('resetValue') raccs = rtag.findtext('access') or ""Unspecified"" addr_offset = rtag.findtext('addressOffset') roffset = int(addr_offset, 16 if addr_offset.startswith('0x') else 10) for ftag in rtag.iter('field'): register_fields_total += 1 fname = ftag.findtext('name') fdesc = ftag.findtext('description') # Some svd files will specify a bitRange rather than # bitOffset and bitWidth frange = ftag.findtext('bitRange') if frange: parts = frange[1:-1].split(':') end = int(parts[0]) start = int(parts[1]) foffset = start fwidth = end - start + 1 else: foffset = int(ftag.findtext('bitOffset')) fwidth = int(ftag.findtext('bitWidth')) faccs = ftag.findtext('access') or raccs enum = ftag.find('enumeratedValues') wc = ftag.find('writeConstraint') doc = False if enum is not None or wc is not None or faccs == ""read-only"": register_fields_documented += 1 if enum is not None: doc = ""Allowed values:
"" if 'derivedFrom' in enum.attrib: dfname = enum.attrib['derivedFrom'] dffrom = rtag.findall( "".//enumeratedValues/[name='"" + dfname + ""']"") if dffrom: enum = dffrom[0] for value in enum.iter('enumeratedValue'): doc += """" doc += value.findtext('value') doc += "": "" doc += value.findtext('name') doc += "": "" doc += value.findtext('description') doc += ""
"" elif wc is not None: wcrange = wc.find('range') if wcrange is not None: mn = wcrange.findtext('minimum') mx = wcrange.findtext('maximum') doc = ""Allowed values: {}-{}"".format(mn, mx) fields[foffset] = {""name"": fname, ""offset"": foffset, ""width"": fwidth, ""description"": fdesc, ""doc"": doc, ""access"": faccs} table = [[{""name"": """", ""width"": 1, ""doc"": False} for _ in range(16)] for _ in range(2)] for foffset in reversed(sorted(fields.keys())): fwidth = fields[foffset]['width'] fname = fields[foffset]['name'] fdoc = bool(fields[foffset]['doc']) faccs = fields[foffset]['access'] for idx in range(foffset, foffset + fwidth): trowidx = (31 - idx)//16 tcolidx = 15 - (idx % 16) tcell = table[trowidx][tcolidx] tcell['name'] = fname tcell['doc'] = fdoc tcell['access'] = short_access(faccs) tcell['separated'] = foffset < 16 and foffset + fwidth > 16 for trow in table: idx = 0 while idx < len(trow)-1: if trow[idx]['name'] == trow[idx+1]['name']: trow[idx]['width'] += 1 del trow[idx+1] continue idx += 1 table = [ {""headers"": reversed(list(range(16, 32))), ""fields"": table[0]}, {""headers"": reversed(list(range(0, 16))), ""fields"": table[1]}] # Bodge to prevent /0 when there are no fields in a register if register_fields_total == 0: register_fields_total = 1 registers[rname] = {""name"": rname, ""offset"": ""0x{:X}"".format(roffset), ""description"": rdesc, ""resetValue"": rrstv, ""access"": raccs, ""fields"": fields, ""table"": table, ""fields_total"": register_fields_total, ""fields_documented"": register_fields_documented} peripheral_fields_total += register_fields_total peripheral_fields_documented += register_fields_documented peripherals[pname] = {""name"": pname, ""base"": pbase, ""description"": pdesc, ""registers"": registers, ""fields_total"": peripheral_fields_total, ""fields_documented"": peripheral_fields_documented} device_fields_total += peripheral_fields_total device_fields_documented += peripheral_fields_documented return {""name"": dname, ""peripherals"": peripherals, ""fields_total"": device_fields_total, ""fields_documented"": device_fields_documented, ""last-modified"": temp, ""svdfile"": svdfile} ","def parse_device(svdfile): tree = ET.parse(svdfile) temp = os.stat(svdfile).st_mtime dname = tree.findtext('name') peripherals = {} device_fields_total = 0 device_fields_documented = 0 for ptag in tree.iter('peripheral'): registers = {} peripheral_fields_total = 0 peripheral_fields_documented = 0 pname = ptag.findtext('name') pbase = ptag.findtext('baseAddress') if 'derivedFrom' in ptag.attrib: dfname = ptag.attrib['derivedFrom'] dffrom = tree.findall("".//peripheral/[name='"" + dfname + ""']"") if dffrom: ptag = dffrom[0] else: print(""Can't find derivedFrom={} for {}"" .format(dfname, pname)) continue pdesc = ptag.findtext('description') for rtag in ptag.iter('register'): fields = {} register_fields_total = 0 register_fields_documented = 0 rname = rtag.findtext('name') rdesc = rtag.findtext('description') rrstv = rtag.findtext('resetValue') raccs = rtag.findtext('access') or ""Unspecified"" roffset = int(rtag.findtext('addressOffset'), 0) for ftag in rtag.iter('field'): register_fields_total += 1 fname = ftag.findtext('name') fdesc = ftag.findtext('description') # Some svd files will specify a bitRange rather than # bitOffset and bitWidth frange = ftag.findtext('bitRange') if frange: parts = frange[1:-1].split(':') end = int(parts[0]) start = int(parts[1]) foffset = start fwidth = end - start + 1 else: foffset = int(ftag.findtext('bitOffset')) fwidth = int(ftag.findtext('bitWidth')) faccs = ftag.findtext('access') or raccs enum = ftag.find('enumeratedValues') wc = ftag.find('writeConstraint') doc = False if enum is not None or wc is not None or faccs == ""read-only"": register_fields_documented += 1 if enum is not None: doc = ""Allowed values:
"" if 'derivedFrom' in enum.attrib: dfname = enum.attrib['derivedFrom'] dffrom = rtag.findall( "".//enumeratedValues/[name='"" + dfname + ""']"") if dffrom: enum = dffrom[0] for value in enum.iter('enumeratedValue'): doc += """" doc += value.findtext('value') doc += "": "" doc += value.findtext('name') doc += "": "" doc += value.findtext('description') doc += ""
"" elif wc is not None: wcrange = wc.find('range') if wcrange is not None: mn = wcrange.findtext('minimum') mx = wcrange.findtext('maximum') doc = ""Allowed values: {}-{}"".format(mn, mx) fields[foffset] = {""name"": fname, ""offset"": foffset, ""width"": fwidth, ""description"": fdesc, ""doc"": doc, ""access"": faccs} table = [[{""name"": """", ""width"": 1, ""doc"": False} for _ in range(16)] for _ in range(2)] for foffset in reversed(sorted(fields.keys())): fwidth = fields[foffset]['width'] fname = fields[foffset]['name'] fdoc = bool(fields[foffset]['doc']) faccs = fields[foffset]['access'] for idx in range(foffset, foffset + fwidth): trowidx = (31 - idx)//16 tcolidx = 15 - (idx % 16) tcell = table[trowidx][tcolidx] tcell['name'] = fname tcell['doc'] = fdoc tcell['access'] = short_access(faccs) tcell['separated'] = foffset < 16 and foffset + fwidth > 16 for trow in table: idx = 0 while idx < len(trow)-1: if trow[idx]['name'] == trow[idx+1]['name']: trow[idx]['width'] += 1 del trow[idx+1] continue idx += 1 table = [ {""headers"": reversed(list(range(16, 32))), ""fields"": table[0]}, {""headers"": reversed(list(range(0, 16))), ""fields"": table[1]}] # Bodge to prevent /0 when there are no fields in a register if register_fields_total == 0: register_fields_total = 1 registers[rname] = {""name"": rname, ""offset"": ""0x{:X}"".format(roffset), ""description"": rdesc, ""resetValue"": rrstv, ""access"": raccs, ""fields"": fields, ""table"": table, ""fields_total"": register_fields_total, ""fields_documented"": register_fields_documented} peripheral_fields_total += register_fields_total peripheral_fields_documented += register_fields_documented peripherals[pname] = {""name"": pname, ""base"": pbase, ""description"": pdesc, ""registers"": registers, ""fields_total"": peripheral_fields_total, ""fields_documented"": peripheral_fields_documented} device_fields_total += peripheral_fields_total device_fields_documented += peripheral_fields_documented return {""name"": dname, ""peripherals"": peripherals, ""fields_total"": device_fields_total, ""fields_documented"": device_fields_documented, ""last-modified"": temp, ""svdfile"": svdfile} " 30427,"def re_create_id_set(): start_time = time.time() scripts_list = [] playbooks_list = [] integration_list = [] testplaybooks_list = [] pool = Pool(processes=cpu_count() * 2) print_color(""Starting the creation of the id_set"", LOG_COLORS.GREEN) print_color(""Starting iterating over Integrations"", LOG_COLORS.GREEN) for arr in pool.map(process_integration, get_integrations_paths()): integration_list.extend(arr) print_color(""Starting iterating over Playbooks"", LOG_COLORS.GREEN) for arr in pool.map(process_playbook, get_playbooks_paths()): playbooks_list.extend(arr) print_color(""Starting iterating over Scripts"", LOG_COLORS.GREEN) for arr in pool.map(process_script, get_scripts_paths()): scripts_list.extend(arr) print_color(""Starting iterating over TestPlaybooks"", LOG_COLORS.GREEN) for pair in pool.map(process_testplaybook_path, get_test_playbooks_paths()): if pair[0]: testplaybooks_list.append(pair[0]) if pair[1]: scripts_list.append(pair[1]) new_ids_dict = OrderedDict() # we sort each time the whole set in case someone manually changed something # it shouldn't take too much time new_ids_dict['scripts'] = sort(scripts_list) new_ids_dict['playbooks'] = sort(playbooks_list) new_ids_dict['integrations'] = sort(integration_list) new_ids_dict['TestPlaybooks'] = sort(testplaybooks_list) with open('./Tests/id_set.json', 'w') as id_set_file: json.dump(new_ids_dict, id_set_file, indent=4) exec_time = time.time() - start_time print_color(""Finished the creation of the id_set. Total time: {} seconds"".format(exec_time), LOG_COLORS.GREEN) duplicates = find_duplicates(new_ids_dict) if duplicates: print_error('The following duplicates were found: {}'.format(duplicates)) sys.exit(1) ","def re_create_id_set(): start_time = time.time() scripts_list = [] playbooks_list = [] integration_list = [] testplaybooks_list = [] pool = Pool(processes=cpu_count() * 2) print_color(""Starting the creation of the id_set"", LOG_COLORS.GREEN) print_color(""Starting iterating over Integrations"", LOG_COLORS.GREEN) for arr in pool.map(process_integration, get_integrations_paths()): integration_list.extend(arr) print_color(""Starting iterating over Playbooks"", LOG_COLORS.GREEN) for arr in pool.map(process_playbook, get_playbooks_paths()): playbooks_list.extend(arr) print_color(""Starting iterating over Scripts"", LOG_COLORS.GREEN) for arr in pool.map(process_script, get_scripts_paths()): scripts_list.extend(arr) print_color(""Starting iterating over TestPlaybooks"", LOG_COLORS.GREEN) for pair in pool.map(process_testplaybook_path, get_test_playbooks_paths()): if pair[0]: testplaybooks_list.append(pair[0]) if pair[1]: scripts_list.append(pair[1]) new_ids_dict = OrderedDict() # we sort each time the whole set in case someone manually changed something # it shouldn't take too much time new_ids_dict['scripts'] = sort(scripts_list) new_ids_dict['playbooks'] = sort(playbooks_list) new_ids_dict['integrations'] = sort(integration_list) new_ids_dict['TestPlaybooks'] = sort(testplaybooks_list) with open('./Tests/id_set.json', 'w') as id_set_file: json.dump(new_ids_dict, id_set_file, indent=4) exec_time = time.time() - start_time print_color(""Finished the creation of the id_set. Total time: {} seconds"".format(exec_time), LOG_COLORS.GREEN) duplicates = find_duplicates(new_ids_dict) if any(duplicates): print_error('The following duplicates were found: {}'.format(duplicates)) sys.exit(1) " 34252,"def add_server_arguments(parser: argparse.ArgumentParser): parser.add_argument( ""--log-file"", type=str, # Rasa should not log to a file by default, otherwise there will be problems # when running on OpenShift default=None, help=""Store logs in specified file."", ) add_endpoint_param( parser, help_text=""Configuration file for the model server and the connectors as a "" ""yml file."", ) server_arguments = parser.add_argument_group(""Server Settings"") server_arguments.add_argument( ""-p"", ""--port"", default=constants.DEFAULT_SERVER_PORT, type=int, help=""Port to run the server at."", ) server_arguments.add_argument( ""-t"", ""--auth-token"", type=str, help=""Enable token based authentication. Requests need to provide "" ""the token to be accepted."", ) server_arguments.add_argument( ""--cors"", nargs=""*"", type=str, help=""Enable CORS for the passed origin. Use * to whitelist all origins."", ) server_arguments.add_argument( ""--enable-api"", action=""store_true"", help=""Start the web server API in addition to the input channel."", ) server_arguments.add_argument( ""--remote-storage"", help=""Set the remote location where your Rasa model is stored, e.g. on AWS."", ) server_arguments.add_argument( ""--ssl-certificate"", help=""Set the SSL Certificate to create a TLS secured server."", ) server_arguments.add_argument( ""--ssl-keyfile"", help=""Set the SSL Keyfile to create a TLS secured server."" ) server_arguments.add_argument( ""--ssl-ca-file"", help=""If your ssl certificate needs to be verified, you can specify the CA file "" ""using this parameter."", ) server_arguments.add_argument( ""--ssl-password"", help=""If your ssl-keyfile is protected by a password, you can specify it "" ""using this paramer."", ) channel_arguments = parser.add_argument_group(""Channels"") channel_arguments.add_argument( ""--credentials"", default=None, help=""Authentication credentials for the connector as a yml file."", ) channel_arguments.add_argument( ""--connector"", type=str, help=""Service to connect to."" ) jwt_auth = parser.add_argument_group(""JWT Authentication"") jwt_auth.add_argument( ""--jwt-secret"", type=str, help=""Public key for asymmetric JWT methods or shared secret"" ""for symmetric methods. Please also make sure to use "" ""--jwt-method to select the method of the signature, "" ""otherwise this argument will be ignored."", ) jwt_auth.add_argument( ""--jwt-method"", type=str, default=""HS256"", help=""Method used for the signature of the JWT authentication payload."", ) ","def add_server_arguments(parser: argparse.ArgumentParser): parser.add_argument( ""--log-file"", type=str, # Rasa should not log to a file by default, otherwise there will be problems # when running on OpenShift default=None, help=""Store logs in specified file."", ) add_endpoint_param( parser, help_text=""Configuration file for the model server and the connectors as a "" ""yml file."", ) server_arguments = parser.add_argument_group(""Server Settings"") server_arguments.add_argument( ""-p"", ""--port"", default=constants.DEFAULT_SERVER_PORT, type=int, help=""Port to run the server at."", ) server_arguments.add_argument( ""-t"", ""--auth-token"", type=str, help=""Enable token based authentication. Requests need to provide "" ""the token to be accepted."", ) server_arguments.add_argument( ""--cors"", nargs=""*"", type=str, help=""Enable CORS for the passed origin. Use * to whitelist all origins."", ) server_arguments.add_argument( ""--enable-api"", action=""store_true"", help=""Start the web server API in addition to the input channel."", ) server_arguments.add_argument( ""--remote-storage"", help=""Set the remote location where your Rasa model is stored, e.g. on AWS."", ) server_arguments.add_argument( ""--ssl-certificate"", help=""Set the SSL Certificate to create a TLS secured server."", ) server_arguments.add_argument( ""--ssl-keyfile"", help=""Set the SSL Keyfile to create a TLS secured server."" ) server_arguments.add_argument( ""--ssl-ca-file"", help=""If your SSL certificate needs to be verified, you can specify the CA file "" ""using this parameter."", ) server_arguments.add_argument( ""--ssl-password"", help=""If your ssl-keyfile is protected by a password, you can specify it "" ""using this paramer."", ) channel_arguments = parser.add_argument_group(""Channels"") channel_arguments.add_argument( ""--credentials"", default=None, help=""Authentication credentials for the connector as a yml file."", ) channel_arguments.add_argument( ""--connector"", type=str, help=""Service to connect to."" ) jwt_auth = parser.add_argument_group(""JWT Authentication"") jwt_auth.add_argument( ""--jwt-secret"", type=str, help=""Public key for asymmetric JWT methods or shared secret"" ""for symmetric methods. Please also make sure to use "" ""--jwt-method to select the method of the signature, "" ""otherwise this argument will be ignored."", ) jwt_auth.add_argument( ""--jwt-method"", type=str, default=""HS256"", help=""Method used for the signature of the JWT authentication payload."", ) " 8886,"def handle_list(options): """"""List Sopel plugins :param options: parsed arguments :type options: ``argparse.Namespace`` :return: Return 0 if everything went fine. """""" settings = utils.load_settings(options) no_color = options.no_color name_only = options.name_only enabled_only = options.enabled_only disabled_only = options.disabled_only # get usable plugins items = ( (name, info[0], info[1]) for name, info in plugins.get_usable_plugins(settings).items() ) items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items ) # filter on enabled/disabled if required if enabled_only: items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items if is_enabled ) elif disabled_only: items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items if not is_enabled ) # sort plugins items = sorted(items, key=operator.itemgetter(0)) for name, plugin, is_enabled in items: description = { 'name': name, 'status': 'enabled' if is_enabled else 'disabled', } # optional meta description from the plugin itself try: plugin.load() description.update(plugin.get_meta_description()) # colorize name for display purpose if not no_color: if is_enabled: description['name'] = utils.green(name) else: description['name'] = utils.red(name) except Exception as error: label = ('%s' % error) or 'unknown loading exception' error_status = 'error' description.update({ 'label': 'Error: %s' % label, 'type': 'unknown', 'source': 'unknown', 'status': error_status, }) if not no_color: if is_enabled: # yellow instead of green description['name'] = utils.yellow(name) else: # keep it red for disabled plugins description['name'] = utils.red(name) description['status'] = utils.red(error_status) template = '{name}/{type} {label} ({source}) [{status}]' if name_only: template = '{name}' print(template.format(**description)) return 0 # successful operation ","def handle_list(options): """"""List Sopel plugins :param options: parsed arguments :type options: ``argparse.Namespace`` :return: 0 if everything went fine """""" settings = utils.load_settings(options) no_color = options.no_color name_only = options.name_only enabled_only = options.enabled_only disabled_only = options.disabled_only # get usable plugins items = ( (name, info[0], info[1]) for name, info in plugins.get_usable_plugins(settings).items() ) items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items ) # filter on enabled/disabled if required if enabled_only: items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items if is_enabled ) elif disabled_only: items = ( (name, plugin, is_enabled) for name, plugin, is_enabled in items if not is_enabled ) # sort plugins items = sorted(items, key=operator.itemgetter(0)) for name, plugin, is_enabled in items: description = { 'name': name, 'status': 'enabled' if is_enabled else 'disabled', } # optional meta description from the plugin itself try: plugin.load() description.update(plugin.get_meta_description()) # colorize name for display purpose if not no_color: if is_enabled: description['name'] = utils.green(name) else: description['name'] = utils.red(name) except Exception as error: label = ('%s' % error) or 'unknown loading exception' error_status = 'error' description.update({ 'label': 'Error: %s' % label, 'type': 'unknown', 'source': 'unknown', 'status': error_status, }) if not no_color: if is_enabled: # yellow instead of green description['name'] = utils.yellow(name) else: # keep it red for disabled plugins description['name'] = utils.red(name) description['status'] = utils.red(error_status) template = '{name}/{type} {label} ({source}) [{status}]' if name_only: template = '{name}' print(template.format(**description)) return 0 # successful operation " 31998,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() # if your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor verify_certificate = not params.get('insecure', False) # if your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = params.get('proxy', False) app_id = params.get('app_id') or params.get('_app_id') base_url = params.get('base_url') tenant_id = params.get('tenant_id') or params.get('_tenant_id') client_credentials = params.get('client_credentials', False) enc_key = params.get('enc_key') or (demisto.getParam('credentials') or {}).get('password') if not app_id: raise Exception('Aplication ID must be provided.') first_fetch_time = params.get('first_fetch', '3 days').strip() fetch_limit = arg_to_number(params.get('max_fetch', 10)) fetch_timeout = arg_to_number(params.get('fetch_timeout', TIMEOUT)) demisto.debug(f'Command being called is {demisto.command()}') command = demisto.command() args = demisto.args() try: client = Client( app_id=app_id, verify=verify_certificate, base_url=base_url, proxy=proxy, tenant_id=tenant_id, enc_key=enc_key, client_credentials=client_credentials, ) if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client)) elif command == 'microsoft-365-defender-auth-start': return_results(start_auth(client)) elif command == 'microsoft-365-defender-auth-complete': return_results(complete_auth(client)) elif command == 'microsoft-365-defender-auth-reset': return_results(reset_auth()) elif command == 'microsoft-365-defender-auth-test': return_results(test_connection(client)) elif command == 'microsoft-365-defender-incidents-list': test_context_for_token(client) return_results(microsoft_365_defender_incidents_list_command(client, args)) elif command == 'microsoft-365-defender-incident-update': test_context_for_token(client) return_results(microsoft_365_defender_incident_update_command(client, args)) elif command == 'microsoft-365-defender-advanced-hunting': test_context_for_token(client) return_results(microsoft_365_defender_advanced_hunting_command(client, args)) elif command == 'fetch-incidents': fetch_limit = arg_to_number(fetch_limit) fetch_timeout = arg_to_number(fetch_timeout) if fetch_timeout else None incidents = fetch_incidents(client, first_fetch_time, fetch_limit, fetch_timeout) demisto.incidents(incidents) else: raise NotImplementedError # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() # if your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor verify_certificate = not params.get('insecure', False) # if your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = params.get('proxy', False) app_id = params.get('app_id') or params.get('_app_id') base_url = params.get('base_url') tenant_id = params.get('tenant_id') or params.get('_tenant_id') client_credentials = params.get('client_credentials', False) enc_key = params.get('enc_key') or (params.get('credentials') or {}).get('password') if not app_id: raise Exception('Aplication ID must be provided.') first_fetch_time = params.get('first_fetch', '3 days').strip() fetch_limit = arg_to_number(params.get('max_fetch', 10)) fetch_timeout = arg_to_number(params.get('fetch_timeout', TIMEOUT)) demisto.debug(f'Command being called is {demisto.command()}') command = demisto.command() args = demisto.args() try: client = Client( app_id=app_id, verify=verify_certificate, base_url=base_url, proxy=proxy, tenant_id=tenant_id, enc_key=enc_key, client_credentials=client_credentials, ) if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client)) elif command == 'microsoft-365-defender-auth-start': return_results(start_auth(client)) elif command == 'microsoft-365-defender-auth-complete': return_results(complete_auth(client)) elif command == 'microsoft-365-defender-auth-reset': return_results(reset_auth()) elif command == 'microsoft-365-defender-auth-test': return_results(test_connection(client)) elif command == 'microsoft-365-defender-incidents-list': test_context_for_token(client) return_results(microsoft_365_defender_incidents_list_command(client, args)) elif command == 'microsoft-365-defender-incident-update': test_context_for_token(client) return_results(microsoft_365_defender_incident_update_command(client, args)) elif command == 'microsoft-365-defender-advanced-hunting': test_context_for_token(client) return_results(microsoft_365_defender_advanced_hunting_command(client, args)) elif command == 'fetch-incidents': fetch_limit = arg_to_number(fetch_limit) fetch_timeout = arg_to_number(fetch_timeout) if fetch_timeout else None incidents = fetch_incidents(client, first_fetch_time, fetch_limit, fetch_timeout) demisto.incidents(incidents) else: raise NotImplementedError # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 14777,"def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the sensor."""""" name = config.get(CONF_NAME) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) try: client = AtomeClient(username, password) except PyAtomeError as exp: _LOGGER.error(exp) return False # finally: # client.close_session() add_entities([AtomeSensor(name, client)]) return True ","def setup_platform(hass, config, add_entities, discovery_info=None): """"""Set up the sensor."""""" name = config[CONF_NAME] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) try: client = AtomeClient(username, password) except PyAtomeError as exp: _LOGGER.error(exp) return False # finally: # client.close_session() add_entities([AtomeSensor(name, client)]) return True " 55264,"def test_notsharing(): # This test will fail if operators alias some underlying arrays upon copy(). a = nk.operator.spin.sigmax(nk.hilbert.Spin(0.5, 2), 0) * nk.operator.spin.sigmax( nk.hilbert.Spin(0.5, 2), 1, dtype=complex ) b = nk.operator.spin.sigmay(nk.hilbert.Spin(0.5, 2), 0) * nk.operator.spin.sigmaz( nk.hilbert.Spin(0.5, 2), 1 ) delta = b - a a_orig = a.to_dense() a_copy = a.copy() a_copy += delta np.testing.assert_allclose(a_orig, a.to_dense()) np.testing.assert_allclose(a_copy.to_dense(), b.to_dense()) ","def test_notsharing(): # This test will fail if operators alias some underlying arrays upon copy(). hi = nk.hilbert.Spin(0.5, 2) a = nk.operator.spin.sigmax(hi, 0) * nk.operator.spin.sigmax(hi, 1, dtype=complex) b = nk.operator.spin.sigmay(hi, 0) * nk.operator.spin.sigmaz(hi, 1) delta = b - a a_orig = a.to_dense() a_copy = a.copy() a_copy += delta np.testing.assert_allclose(a_orig, a.to_dense()) np.testing.assert_allclose(a_copy.to_dense(), b.to_dense()) " 27991,"def add_arguments_to_parser(parser): """""" Add the subcommand's arguments to the given argparse.ArgumentParser. """""" parser.add_argument('logfile', type=str, help=""Path to the JSON compilation command database "" ""files which were created during the build. "" ""The analyzers will check only the files "" ""registered in these build databases."") parser.add_argument('-j', '--jobs', type=int, dest=""jobs"", required=False, default=1, help=""Number of threads to use in analysis. More "" ""threads mean faster analysis at the cost of "" ""using more memory."") skip_mode = parser.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest=""skipfile"", required=False, default=argparse.SUPPRESS, help=""Path to the Skipfile dictating which project "" ""files should be omitted from analysis. "" ""Please consult the User guide on how a "" ""Skipfile should be laid out."") skip_mode.add_argument('--file', nargs='+', dest=""files"", metavar='FILE', required=False, default=argparse.SUPPRESS, help=""Analyze only the given file(s) not the whole "" ""compilation database. Absolute directory "" ""paths should start with '/', relative "" ""directory paths should start with '*' and "" ""it can contain path glob pattern. "" ""Example: '/path/to/main.cpp', 'lib/*.cpp', "" ""*/test*'."") parser.add_argument('-o', '--output', dest=""output_path"", required=True, default=argparse.SUPPRESS, help=""Store the analysis output in the given folder."") parser.add_argument('--compiler-info-file', dest=""compiler_info_file"", required=False, default=argparse.SUPPRESS, help=""Read the compiler includes and target from the "" ""specified file rather than invoke the compiler "" ""executable."") parser.add_argument('--keep-gcc-include-fixed', dest=""keep_gcc_include_fixed"", required=False, action='store_true', default=False, help=""There are some implicit include paths which are "" ""only used by GCC (include-fixed). This flag "" ""determines whether these should be kept among "" ""the implicit include paths."") parser.add_argument('--keep-gcc-intrin', dest=""keep_gcc_intrin"", required=False, action='store_true', default=False, help=""There are some implicit include paths which "" ""contain GCC-specific header files (those "" ""which end with intrin.h). This flag determines "" ""whether these should be kept among the implicit "" ""include paths. Use this flag if Clang analysis "" ""fails with error message related to __builtin "" ""symbols."") parser.add_argument('-t', '--type', '--output-format', dest=""output_format"", required=False, choices=['plist'], default='plist', help=""Specify the format the analysis results should "" ""use."") parser.add_argument('-q', '--quiet', dest=""quiet"", action='store_true', default=argparse.SUPPRESS, required=False, help=""Do not print the output or error of the "" ""analyzers to the standard output of "" ""CodeChecker."") parser.add_argument('-c', '--clean', dest=""clean"", required=False, action='store_true', default=argparse.SUPPRESS, help=""Delete analysis reports stored in the output "" ""directory. (By default, CodeChecker would keep "" ""reports and overwrites only those files that "" ""were update by the current build command)."") parser.add_argument('--compile-uniqueing', type=str, dest=""compile_uniqueing"", default=""none"", required=False, help=""Specify the method the compilation "" ""actions in the compilation database are "" ""uniqued before analysis. "" ""CTU analysis works properly only if "" ""there is exactly one "" ""compilation action per source file. "" ""none(default in non CTU mode): "" ""no uniqueing is done. "" ""strict: no uniqueing is done, "" ""and an error is given if "" ""there is more than one compilation "" ""action for a source file. "" ""alpha(default in CTU mode): If there is more "" ""than one compilation action for a source "" ""file, only the one is kept that belongs to the "" ""alphabetically first "" ""compilation target. "" ""If none of the above given, "" ""this parameter should "" ""be a python regular expression."" ""If there is more than one compilation action "" ""for a source, "" ""only the one is kept which matches the "" ""given python regex. If more than one "" ""matches an error is given. "" ""The whole compilation "" ""action text is searched for match."") parser.add_argument('--report-hash', dest=""report_hash"", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help=""R|Specify the hash calculation method for "" ""reports. By default the calculation method for "" ""Clang Static Analyzer is context sensitive and "" ""for Clang Tidy it is context insensitive.\n"" ""You can use the following calculation methods:\n"" ""- context-free: there was a bug and for Clang "" ""Tidy not the context free hash was generated "" ""(kept for backward compatibility).\n"" ""- context-free-v2: context free hash is used "" ""for ClangSA and Clang Tidy.\n"" ""See the 'issue hashes' section of the help "" ""message of this command below for more "" ""information.\n"" ""USE WISELY AND AT YOUR OWN RISK!"") parser.add_argument('-n', '--name', dest=""name"", required=False, default=argparse.SUPPRESS, help=""Annotate the run analysis with a custom name in "" ""the created metadata file."") analyzer_opts = parser.add_argument_group(""analyzer arguments"") analyzer_opts.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help=""Run analysis only with the analyzers "" ""specified. Currently supported analyzers "" ""are: "" + ', '.join(analyzer_types. supported_analyzers) + ""."") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help=""Store standard output and standard error "" ""of successful analyzer invocations "" ""into the '/success' "" ""directory."") analyzer_opts.add_argument('--config', dest='config_file', required=False, help=""R|Allow the configuration from an "" ""explicit JSON based configuration file. "" ""The value of the 'analyzer' key in the "" ""config file will be emplaced as command "" ""line arguments. The format of "" ""configuration file is:\n"" ""{\n"" "" \""analyzer\"": [\n"" "" \""--enable=core.DivideZero\"",\n"" "" \""--enable=core.CallAndMessage\"",\n"" "" \""--report-hash=context-free-v2\"",\n"" "" \""--verbose=debug\"",\n"" "" \""--clean\""\n"" "" ]\n"" ""}"") analyzer_opts.add_argument('--saargs', dest=""clangsa_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang Static "" ""Analyzer."") analyzer_opts.add_argument('--tidyargs', dest=""tidy_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for Clang-Tidy."") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help=""A file in YAML format containing the "" ""configuration of clang-tidy checkers. "" ""The file can be dumped by "" ""'CodeChecker analyzers --dump-config "" ""clang-tidy' command."") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=[""clang-tidy:HeaderFilterRegex=.*""], help=""Analyzer configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker analyzers "" ""--analyzer-config'. To disable the "" ""default behaviour of this option you can "" ""use the "" ""'clang-tidy:take-config-from-directory="" ""true' option."") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help=""Checker configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker checkers --checker-config'."") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help=""The amount of time (in seconds) that "" ""each analyzer can spend, individually, "" ""to analyze the project. If the analysis "" ""of a particular file takes longer than "" ""this time, the analyzer is killed and "" ""the analysis is considered as a failed "" ""one."") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help=""Enable the z3 solver backend. This "" ""allows reasoning over more complex "" ""queries, but performance is worse "" ""than the default range-based "" ""constraint solver."") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument('--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help=""Switch on/off the Z3 SMT Solver "" ""backend to "" ""reduce false positives. The results "" ""of the ranged based constraint "" ""solver in the Clang Static Analyzer "" ""will be cross checked with the Z3 "" ""SMT solver. This should not cause "" ""that much of a slowdown compared to "" ""using the Z3 solver only."") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( ""cross translation unit analysis arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker analyze' is called."""""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform Cross Translation Unit (CTU) "" ""analysis, both 'collect' and 'analyze' "" ""phases. In this mode, the extra files "" ""created by 'collect' are cleaned up "" ""after the analysis."") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the first, 'collect' phase of "" ""Cross-TU analysis. This phase generates "" ""extra files needed by CTU analysis, and "" ""puts them into '/ctu-dir'. "" ""NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the second, 'analyze' phase of "" ""Cross-TU analysis, using already "" ""available extra files in "" ""'/ctu-dir'. (These files "" ""will not be cleaned up in this mode.)"") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help=""If Cross-TU analysis is enabled and fails "" ""for some reason, try to re analyze the "" ""same translation unit without "" ""Cross-TU enabled."") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( ""Statistics analysis feature arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available)."""""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help=""Perform the first, 'collect' phase of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis, and "" ""puts them into "" ""''."" "" NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help=""Use the previously generated statistics "" ""results for the analysis from the given "" ""''."") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help=""Perform both phases of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis and enables "" ""the statistical checkers. "" ""No need to enable them explicitly."") stat_opts.add_argument('--stats-min-sample-count', action='store', default=""10"", type=int, dest='stats_min_sample_count', help=""Minimum number of samples (function call"" "" occurrences) to be collected"" "" for a statistics to be relevant "" ""''."") stat_opts.add_argument('--stats-relevance-threshold', action='store', default=""0.85"", type=float, dest='stats_relevance_threshold', help=""The minimum ratio of calls of function "" ""f that must have a certain property "" ""property to consider it true for that "" ""function (calculated as calls "" ""with a property/all calls)."" "" CodeChecker will warn for"" "" calls of f do not have that property."" ""''."") checkers_opts = parser.add_argument_group( ""checker configuration"", """""" Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or ""checkers"". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy."""""") checkers_opts.add_argument('-e', '--enable', dest=""enable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group) "" ""to BE USED in the analysis."") checkers_opts.add_argument('-d', '--disable', dest=""disable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group) "" ""to BE PROHIBITED from use in the "" ""analysis."") checkers_opts.add_argument('--enable-all', dest=""enable_all"", action='store_true', required=False, default=argparse.SUPPRESS, help=""Force the running analyzers to use "" ""almost every checker available. The "" ""checker groups 'alpha.', 'debug.' and "" ""'osx.' (on Linux) are NOT enabled "" ""automatically and must be EXPLICITLY "" ""specified. WARNING! Enabling all "" ""checkers might result in the analysis "" ""losing precision and stability, and "" ""could even result in a total failure of "" ""the analysis. USE WISELY AND AT YOUR "" ""OWN RISK!"") logger.add_verbose_arguments(parser) parser.set_defaults(func=main, func_process_config_file=process_config_file) ","def add_arguments_to_parser(parser): """""" Add the subcommand's arguments to the given argparse.ArgumentParser. """""" parser.add_argument('logfile', type=str, help=""Path to the JSON compilation command database "" ""files which were created during the build. "" ""The analyzers will check only the files "" ""registered in these build databases."") parser.add_argument('-j', '--jobs', type=int, dest=""jobs"", required=False, default=1, help=""Number of threads to use in analysis. More "" ""threads mean faster analysis at the cost of "" ""using more memory."") skip_mode = parser.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest=""skipfile"", required=False, default=argparse.SUPPRESS, help=""Path to the Skipfile dictating which project "" ""files should be omitted from analysis. "" ""Please consult the User guide on how a "" ""Skipfile should be laid out."") skip_mode.add_argument('--file', nargs='+', dest=""files"", metavar='FILE', required=False, default=argparse.SUPPRESS, help=""Analyze only the given file(s) not the whole "" ""compilation database. Absolute directory "" ""paths should start with '/', relative "" ""directory paths should start with '*' and "" ""it can contain path glob pattern. "" ""Example: '/path/to/main.cpp', 'lib/*.cpp', "" ""*/test*'."") parser.add_argument('-o', '--output', dest=""output_path"", required=True, default=argparse.SUPPRESS, help=""Store the analysis output in the given folder."") parser.add_argument('--compiler-info-file', dest=""compiler_info_file"", required=False, default=argparse.SUPPRESS, help=""Read the compiler includes and target from the "" ""specified file rather than invoke the compiler "" ""executable."") parser.add_argument('--keep-gcc-include-fixed', dest=""keep_gcc_include_fixed"", required=False, action='store_true', default=False, help=""There are some implicit include paths which are "" ""only used by GCC (include-fixed). This flag "" ""determines whether these should be kept among "" ""the implicit include paths."") parser.add_argument('--keep-gcc-intrin', dest=""keep_gcc_intrin"", required=False, action='store_true', default=False, help=""There are some implicit include paths which "" ""contain GCC-specific header files (those "" ""which end with intrin.h). This flag determines "" ""whether these should be kept among the implicit "" ""include paths. Use this flag if Clang analysis "" ""fails with error message related to __builtin "" ""symbols."") parser.add_argument('-t', '--type', '--output-format', dest=""output_format"", required=False, choices=['plist'], default='plist', help=""Specify the format the analysis results should "" ""use."") parser.add_argument('-q', '--quiet', dest=""quiet"", action='store_true', default=argparse.SUPPRESS, required=False, help=""Do not print the output or error of the "" ""analyzers to the standard output of "" ""CodeChecker."") parser.add_argument('-c', '--clean', dest=""clean"", required=False, action='store_true', default=argparse.SUPPRESS, help=""Delete analysis reports stored in the output "" ""directory. (By default, CodeChecker would keep "" ""reports and overwrites only those files that "" ""were update by the current build command)."") parser.add_argument('--compile-uniqueing', type=str, dest=""compile_uniqueing"", default=""none"", required=False, help=""Specify the method the compilation "" ""actions in the compilation database are "" ""uniqued before analysis. "" ""CTU analysis works properly only if "" ""there is exactly one "" ""compilation action per source file. "" ""none(default in non CTU mode): "" ""no uniqueing is done. "" ""strict: no uniqueing is done, "" ""and an error is given if "" ""there is more than one compilation "" ""action for a source file. "" ""alpha(default in CTU mode): If there is more "" ""than one compilation action for a source "" ""file, only the one is kept that belongs to the "" ""alphabetically first "" ""compilation target. "" ""If none of the above given, "" ""this parameter should "" ""be a python regular expression."" ""If there is more than one compilation action "" ""for a source, "" ""only the one is kept which matches the "" ""given python regex. If more than one "" ""matches an error is given. "" ""The whole compilation "" ""action text is searched for match."") parser.add_argument('--report-hash', dest=""report_hash"", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help=""R|Specify the hash calculation method for "" ""reports. By default the calculation method for "" ""Clang Static Analyzer is context sensitive and "" ""for Clang Tidy it is context insensitive.\n"" ""You can use the following calculation methods:\n"" ""- context-free: there was a bug and for Clang "" ""Tidy not the context free hash was generated "" ""(kept for backward compatibility).\n"" ""- context-free-v2: context free hash is used "" ""for ClangSA and Clang Tidy.\n"" ""See the 'issue hashes' section of the help "" ""message of this command below for more "" ""information.\n"" ""USE WISELY AND AT YOUR OWN RISK!"") parser.add_argument('-n', '--name', dest=""name"", required=False, default=argparse.SUPPRESS, help=""Annotate the run analysis with a custom name in "" ""the created metadata file."") analyzer_opts = parser.add_argument_group(""analyzer arguments"") analyzer_opts.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help=""Run analysis only with the analyzers "" ""specified. Currently supported analyzers "" ""are: "" + ', '.join(analyzer_types. supported_analyzers) + ""."") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help=""Store standard output and standard error "" ""of successful analyzer invocations "" ""into the '/success' "" ""directory."") analyzer_opts.add_argument('--config', dest='config_file', required=False, help=""Allow the configuration from an "" ""explicit JSON based configuration file. "" ""The value of the 'analyzer' key in the "" ""config file will be emplaced as command "" ""line arguments. The format of "" ""configuration file is:\n"" ""{\n"" "" \""analyzer\"": [\n"" "" \""--enable=core.DivideZero\"",\n"" "" \""--enable=core.CallAndMessage\"",\n"" "" \""--report-hash=context-free-v2\"",\n"" "" \""--verbose=debug\"",\n"" "" \""--clean\""\n"" "" ]\n"" ""}"") analyzer_opts.add_argument('--saargs', dest=""clangsa_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for the Clang Static "" ""Analyzer."") analyzer_opts.add_argument('--tidyargs', dest=""tidy_args_cfg_file"", required=False, default=argparse.SUPPRESS, help=""File containing argument which will be "" ""forwarded verbatim for Clang-Tidy."") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help=""A file in YAML format containing the "" ""configuration of clang-tidy checkers. "" ""The file can be dumped by "" ""'CodeChecker analyzers --dump-config "" ""clang-tidy' command."") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=[""clang-tidy:HeaderFilterRegex=.*""], help=""Analyzer configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker analyzers "" ""--analyzer-config'. To disable the "" ""default behaviour of this option you can "" ""use the "" ""'clang-tidy:take-config-from-directory="" ""true' option."") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help=""Checker configuration options in the "" ""following format: analyzer:key=value. "" ""The collection of the options can be "" ""printed with "" ""'CodeChecker checkers --checker-config'."") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help=""The amount of time (in seconds) that "" ""each analyzer can spend, individually, "" ""to analyze the project. If the analysis "" ""of a particular file takes longer than "" ""this time, the analyzer is killed and "" ""the analysis is considered as a failed "" ""one."") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help=""Enable the z3 solver backend. This "" ""allows reasoning over more complex "" ""queries, but performance is worse "" ""than the default range-based "" ""constraint solver."") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument('--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help=""Switch on/off the Z3 SMT Solver "" ""backend to "" ""reduce false positives. The results "" ""of the ranged based constraint "" ""solver in the Clang Static Analyzer "" ""will be cross checked with the Z3 "" ""SMT solver. This should not cause "" ""that much of a slowdown compared to "" ""using the Z3 solver only."") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( ""cross translation unit analysis arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker analyze' is called."""""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform Cross Translation Unit (CTU) "" ""analysis, both 'collect' and 'analyze' "" ""phases. In this mode, the extra files "" ""created by 'collect' are cleaned up "" ""after the analysis."") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the first, 'collect' phase of "" ""Cross-TU analysis. This phase generates "" ""extra files needed by CTU analysis, and "" ""puts them into '/ctu-dir'. "" ""NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help=""Perform the second, 'analyze' phase of "" ""Cross-TU analysis, using already "" ""available extra files in "" ""'/ctu-dir'. (These files "" ""will not be cleaned up in this mode.)"") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help=""If Cross-TU analysis is enabled and fails "" ""for some reason, try to re analyze the "" ""same translation unit without "" ""Cross-TU enabled."") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( ""Statistics analysis feature arguments"", """""" These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available)."""""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help=""Perform the first, 'collect' phase of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis, and "" ""puts them into "" ""''."" "" NOTE: If this argument is present, "" ""CodeChecker will NOT execute the "" ""analyzers!"") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help=""Use the previously generated statistics "" ""results for the analysis from the given "" ""''."") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help=""Perform both phases of "" ""Statistical analysis. This phase "" ""generates extra files needed by "" ""statistics analysis and enables "" ""the statistical checkers. "" ""No need to enable them explicitly."") stat_opts.add_argument('--stats-min-sample-count', action='store', default=""10"", type=int, dest='stats_min_sample_count', help=""Minimum number of samples (function call"" "" occurrences) to be collected"" "" for a statistics to be relevant "" ""''."") stat_opts.add_argument('--stats-relevance-threshold', action='store', default=""0.85"", type=float, dest='stats_relevance_threshold', help=""The minimum ratio of calls of function "" ""f that must have a certain property "" ""property to consider it true for that "" ""function (calculated as calls "" ""with a property/all calls)."" "" CodeChecker will warn for"" "" calls of f do not have that property."" ""''."") checkers_opts = parser.add_argument_group( ""checker configuration"", """""" Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or ""checkers"". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy."""""") checkers_opts.add_argument('-e', '--enable', dest=""enable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group) "" ""to BE USED in the analysis."") checkers_opts.add_argument('-d', '--disable', dest=""disable"", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help=""Set a checker (or checker group) "" ""to BE PROHIBITED from use in the "" ""analysis."") checkers_opts.add_argument('--enable-all', dest=""enable_all"", action='store_true', required=False, default=argparse.SUPPRESS, help=""Force the running analyzers to use "" ""almost every checker available. The "" ""checker groups 'alpha.', 'debug.' and "" ""'osx.' (on Linux) are NOT enabled "" ""automatically and must be EXPLICITLY "" ""specified. WARNING! Enabling all "" ""checkers might result in the analysis "" ""losing precision and stability, and "" ""could even result in a total failure of "" ""the analysis. USE WISELY AND AT YOUR "" ""OWN RISK!"") logger.add_verbose_arguments(parser) parser.set_defaults(func=main, func_process_config_file=process_config_file) " 58121,"def main() -> None: api_key: str = demisto.params().get('api_key') base_url: str = demisto.params()['server'] verify_certificate: bool = not demisto.params().get('insecure', False) proxy: bool = demisto.params().get('proxy', False) command: str = demisto.command() args: Dict[str, Any] = demisto.args() headers: Dict[str, Any] = { 'Authorization': f'Bearer {api_key}', 'User-Agent': 'Binalyze AIR', 'Content-type': 'application/json', 'Accept-Charset': 'UTF-8' } try: demisto.debug(f'Command being called is {demisto.command()}') client: Client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy ) if command == 'test-module': return_results(test_connection(base_url)) elif command == 'binalyze-air-acquire': return_results(air_acquire_command(client, args)) elif command == 'binalyze-air-isolate': return_results(air_isolate_command(client, args)) except Exception as ex: message: str = str(ex) if '404' in message: return_results(f'Nothing found for {command}') else: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute BaseScript. Error: {str(ex)}') ","def main() -> None: api_key: str = demisto.params().get('api_key') base_url: str = demisto.params()['server'] verify_certificate: bool = not demisto.params().get('insecure', False) proxy: bool = demisto.params().get('proxy', False) command: str = demisto.command() args: Dict[str, Any] = demisto.args() headers: Dict[str, Any] = { 'Authorization': f'Bearer {api_key}', 'User-Agent': 'Binalyze AIR', 'Content-type': 'application/json', 'Accept-Charset': 'UTF-8' } try: demisto.debug(f'Command being called is {demisto.command()}') client: Client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy ) if command == 'test-module': return_results(test_connection(base_url)) elif command == 'binalyze-air-acquire': return_results(air_acquire_command(client, args)) elif command == 'binalyze-air-isolate': return_results(air_isolate_command(client, args)) except Exception as ex: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute ""{command}"". Error: {str(ex)}') " 54313,"def test_ignore_cached_files(): sf = SpectrumFactory(wavenum_min=2000, wavenum_max=3000, pressure=1,) loaded_files = True file_dir = getTestFile(""cdsd_hitemp_09_fragment.txt"") test_file = file_dir[:-8] + ""*"" sf.load_databank(path=test_file, format=""cdsd-hitemp"", parfuncfmt=""hapi"") try: sf.load_databank(path=test_file, format=""cdsd-hitemp"", parfuncfmt=""hapi"") except UnicodeDecodeError: loaded_files = False assert loaded_files ","def test_ignore_cached_files(): sf = SpectrumFactory(wavenum_min=2000, wavenum_max=3000, pressure=1,) loaded_files = True file_dir = getTestFile(""cdsd_hitemp_09_fragment.txt"") test_file = file_dir[:-8] + ""*"" sf.load_databank(path=test_file, format=""cdsd-hitemp"", parfuncfmt=""hapi"") try: sf.load_databank(path=test_file, format=""cdsd-hitemp"", parfuncfmt=""hapi"") except UnicodeDecodeError as err: raise UnicodeDecodeError(""Couldnt load database the 2nd time. This may be due to cache files trying to be read as normal files"") from err " 55225,"def _encode_overrides(kvs, overrides, encode_json=False): override_kvs = {} for k, v in kvs.items(): if k in overrides: exclude = overrides[k].exclude # If the exclude predicate returns true, the key should be # excluded from encoding, so skip the rest of the loop if exclude and exclude(v): continue letter_case = overrides[k].letter_case original_key = k k = letter_case(k) if letter_case is not None else k encoder = overrides[original_key].encoder # v = encoder(v) if encoder is not None else v if encoder is not None: try: v = encoder(v) except: # noqa: E722 raise ValueError( f""Encoder encountered an error with field '{k}'"" ) if encode_json: v = _encode_json_type(v) override_kvs[k] = v return override_kvs ","def _encode_overrides(kvs, overrides, encode_json=False): override_kvs = {} for k, v in kvs.items(): if k in overrides: exclude = overrides[k].exclude # If the exclude predicate returns true, the key should be # excluded from encoding, so skip the rest of the loop if exclude and exclude(v): continue letter_case = overrides[k].letter_case original_key = k k = letter_case(k) if letter_case is not None else k encoder = overrides[original_key].encoder # v = encoder(v) if encoder is not None else v if encoder is not None: try: v = encoder(v) except Exception as e: # noqa: E722 raise ValueError(f""Encoder encountered an error with field '{k}'"") from e raise ValueError( f""Encoder encountered an error with field '{k}'"" ) if encode_json: v = _encode_json_type(v) override_kvs[k] = v return override_kvs " 8744,"def test_save_unmodified_config(multi_fakeconfig): """"""Assert type attributes are kept as they should be"""""" multi_fakeconfig.save() saved_config = config.Config(multi_fakeconfig.filename) saved_config.define_section('fake', FakeConfigSection) saved_config.define_section('spam', SpamSection) # core assert saved_config.core.owner == 'dgw' # fake assert saved_config.fake.valattr is None assert saved_config.fake.listattr == [] assert saved_config.fake.choiceattr is None assert saved_config.fake.af_fileattr is None assert saved_config.fake.ad_fileattr is None assert saved_config.fake.rf_fileattr is None assert saved_config.fake.rd_fileattr is None # spam assert saved_config.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-breakline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert saved_config.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert saved_config.spam.cheese == [ 'cheddar', 'reblochon', 'camembert', ] ","def test_save_unmodified_config(multi_fakeconfig): """"""Assert type attributes are kept as they should be"""""" multi_fakeconfig.save() saved_config = config.Config(multi_fakeconfig.filename) saved_config.define_section('fake', FakeConfigSection) saved_config.define_section('spam', SpamSection) # core assert saved_config.core.owner == 'dgw' # fake assert saved_config.fake.valattr is None assert saved_config.fake.listattr == [] assert saved_config.fake.choiceattr is None assert saved_config.fake.af_fileattr is None assert saved_config.fake.ad_fileattr is None assert saved_config.fake.rf_fileattr is None assert saved_config.fake.rd_fileattr is None # spam assert saved_config.spam.eggs == [ 'one', 'two', 'three', 'four', 'and a half', # no-breakline + comma ], 'Comma separated line: ""four"" and ""and a half"" must be separated' assert saved_config.spam.bacons == [ 'grilled', 'burn out', 'greasy, fat, and tasty', ] assert saved_config.spam.cheeses == [ 'cheddar', 'reblochon', 'camembert', ] " 24943,"def check_messages(*messages: str) -> Callable: """"""Kept for backwards compatibility, deprecated. Use only_required_for instead, which conveys the intent of the decorator much clearer. """""" # Uncomment the following warning once all 'check_messages' calls have been replaced # in pylints codebase. # warnings.warn( # ""utils.check_messages will be removed in favour of calling "" # ""utils.only_required_for in pylint 3.0"", # DeprecationWarning, # ) return only_required_for(*messages) ","def check_messages(*messages: str) -> Callable[[nodes.NodeNG], None]: """"""Kept for backwards compatibility, deprecated. Use only_required_for instead, which conveys the intent of the decorator much clearer. """""" # Uncomment the following warning once all 'check_messages' calls have been replaced # in pylints codebase. # warnings.warn( # ""utils.check_messages will be removed in favour of calling "" # ""utils.only_required_for in pylint 3.0"", # DeprecationWarning, # ) return only_required_for(*messages) " 13337,"def volume_lock(request, object_id): volume = models.Volume.objects.get(id=object_id) assert(volume.vol_encrypt > 0) if request.method == ""POST"": _n = notifier() if '__confirm' not in request.POST and not _n.is_freenas() and _n.failover_licensed(): remaining_volumes = [v for v in models.Volume.objects.exclude(pk=object_id) if v.is_decrypted()] if not remaining_volumes: message = render_to_string('freeadmin/generic_model_confirm.html', { 'message': 'Warning: Locking this volume will prevent failover from functioning correctly.
Do you want to continue?', }) return JsonResp(request, confirm=message) notifier().volume_detach(volume) if hasattr(notifier, 'failover_status') and notifier().failover_status() == 'MASTER': from freenasUI.failover.enc_helper import LocalEscrowCtl escrowctl = LocalEscrowCtl() escrowctl.clear() try: os.unlink('/tmp/.failover_master') except Exception: pass try: with client as c: c.call('failover.call_remote', 'failover.encryption_clearkey') except Exception: log.warn('Failed to clear key on standby node, is it down?', exc_info=True) notifier().restart(""system_datasets"") return JsonResp(request, message=_(""Volume locked"")) with client as c: sys_dataset = c.call('systemdataset.config') if volume.vol_name == sys_dataset['pool']: return render( request, 'freeadmin/generic_model_dialog.html', { 'msg': 'Pool contains the system dataset and cannot be locked. Please select a different pool ' 'or configure the system dataset to be on a different pool.' } ) return render(request, ""storage/lock.html"") ","def volume_lock(request, object_id): volume = models.Volume.objects.get(id=object_id) assert(volume.vol_encrypt > 0) if request.method == ""POST"": _n = notifier() if '__confirm' not in request.POST and not _n.is_freenas() and _n.failover_licensed(): remaining_volumes = [v for v in models.Volume.objects.exclude(pk=object_id) if v.is_decrypted()] if not remaining_volumes: message = render_to_string('freeadmin/generic_model_confirm.html', { 'message': 'Warning: Locking this volume will prevent failover from functioning correctly.
Do you want to continue?', }) return JsonResp(request, confirm=message) notifier().volume_detach(volume) if hasattr(notifier, 'failover_status') and notifier().failover_status() == 'MASTER': from freenasUI.failover.enc_helper import LocalEscrowCtl escrowctl = LocalEscrowCtl() escrowctl.clear() try: os.unlink('/tmp/.failover_master') except Exception: pass try: with client as c: c.call('failover.call_remote', 'failover.encryption_clearkey') except Exception: log.warn('Failed to clear key on standby node, is it down?', exc_info=True) notifier().restart(""system_datasets"") return JsonResp(request, message=_(""Volume locked"")) with client as c: sys_dataset = c.call('systemdataset.config') if volume.vol_name == sys_dataset['pool']: return render( request, 'freeadmin/generic_model_dialog.html', { 'msg': 'This pool contains the system dataset and cannot be locked. To lock this pool, migrate the ' 'or configure the system dataset to be on a different pool.' } ) return render(request, ""storage/lock.html"") " 12683,"def _log_or_raise_unmatched_owners( file_paths: Sequence[PurePath], owners_not_found_behavior: OwnersNotFoundBehavior, ignore_option: Optional[str] = None, ) -> None: option_msg = ( f""\n\nIf you would like to ignore un-owned files, please pass `{ignore_option}`."" if ignore_option else """" ) if len(file_paths) == 1: prefix = ( f""No owning targets could be found for the file `{file_paths[0]}`.\n\n"" f""Please check that there is a BUILD file in the parent directory "" f""{file_paths[0].parent} with a target whose `sources` field includes the file."" ) else: prefix = ( f""No owning targets could be found for the files {sorted(map(str, file_paths))}`.\n\n"" f""Please check that there are BUILD files in each file's parent directory with a "" f""target whose `sources` field includes the file."" ) msg = ( f""{prefix} See {bracketed_docs_url('targets')} for more information on target definitions."" f""\n\nMaybe run `./pants tailor` to autogenerate your BUILD files? See "" f""{bracketed_docs_url('create-initial-build-files')}.{option_msg}"" ) if owners_not_found_behavior == OwnersNotFoundBehavior.warn: logger.warning(msg) else: raise ResolveError(msg) ","def _log_or_raise_unmatched_owners( file_paths: Sequence[PurePath], owners_not_found_behavior: OwnersNotFoundBehavior, ignore_option: Optional[str] = None, ) -> None: option_msg = ( f""\n\nIf you would like to ignore un-owned files, please pass `{ignore_option}`."" if ignore_option else """" ) if len(file_paths) == 1: prefix = ( f""No owning targets could be found for the file `{file_paths[0]}`.\n\n"" f""Please check that there is a BUILD file in the parent directory "" f""{file_paths[0].parent} with a target whose `sources` field includes the file."" ) else: prefix = ( f""No owning targets could be found for the files {sorted(map(str, file_paths))}`.\n\n"" f""Please check that there are BUILD files in each file's parent directory with a "" f""target whose `sources` field includes the file."" ) msg = ( f""{prefix} See {bracketed_docs_url('targets')} for more information on target definitions."" f""\n\nYou may want to run `./pants tailor` to autogenerate your BUILD files? See "" f""{bracketed_docs_url('create-initial-build-files')}.{option_msg}"" ) if owners_not_found_behavior == OwnersNotFoundBehavior.warn: logger.warning(msg) else: raise ResolveError(msg) " 35214,"def _nvcc_gencode_options(cuda_version): """"""Returns NVCC GPU code generation options."""""" if sys.argv == ['setup.py', 'develop']: return [] envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None) if envcfg is not None and envcfg != 'current': return ['--generate-code={}'.format(arch) for arch in envcfg.split(';') if len(arch) > 0] if envcfg == 'current' and build.get_compute_capabilities() is not None: ccs = build.get_compute_capabilities() arch_list = [ f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}') for cc in ccs] else: # The arch_list specifies virtual architectures, such as 'compute_61', # and real architectures, such as 'sm_61', for which the CUDA # input files are to be compiled. # # The syntax of an entry of the list is # # entry ::= virtual_arch | (virtual_arch, real_arch) # # where virtual_arch is a string which means a virtual architecture and # real_arch is a string which means a real architecture. # # If a virtual architecture is supplied, NVCC generates a PTX code # the virtual architecture. If a pair of a virtual architecture and a # real architecture is supplied, NVCC generates a PTX code for the # virtual architecture as well as a cubin code for the real one. # # For example, making NVCC generate a PTX code for 'compute_60' virtual # architecture, the arch_list has an entry of 'compute_60'. # # arch_list = ['compute_60'] # # For another, making NVCC generate a PTX code for 'compute_61' virtual # architecture and a cubin code for 'sm_61' real architecture, the # arch_list has an entry of ('compute_61', 'sm_61'). # # arch_list = [('compute_61', 'sm_61')] # # See the documentation of each CUDA version for the list of supported # architectures: # # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation if cuda_version >= 11040: arch_list = ['compute_35', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), ('compute_80', 'sm_80'), ('compute_86', 'sm_86'), 'compute_86'] elif cuda_version >= 11000: arch_list = ['compute_35', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), ('compute_80', 'sm_80'), 'compute_80'] elif cuda_version >= 10000: arch_list = ['compute_30', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), 'compute_70'] elif cuda_version >= 9020: arch_list = ['compute_30', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), 'compute_70'] else: # This should not happen. assert False options = [] for arch in arch_list: if type(arch) is tuple: virtual_arch, real_arch = arch options.append('--generate-code=arch={},code={}'.format( virtual_arch, real_arch)) else: options.append('--generate-code=arch={},code={}'.format( arch, arch)) return options ","def _nvcc_gencode_options(cuda_version): """"""Returns NVCC GPU code generation options."""""" if sys.argv == ['setup.py', 'develop']: return [] envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None) if envcfg is not None and envcfg != 'current': return ['--generate-code={}'.format(arch) for arch in envcfg.split(';') if len(arch) > 0] if envcfg == 'current' and build.get_compute_capabilities() is not None: ccs = build.get_compute_capabilities() arch_list = [ f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}') for cc in ccs] else: # The arch_list specifies virtual architectures, such as 'compute_61', # and real architectures, such as 'sm_61', for which the CUDA # input files are to be compiled. # # The syntax of an entry of the list is # # entry ::= virtual_arch | (virtual_arch, real_arch) # # where virtual_arch is a string which means a virtual architecture and # real_arch is a string which means a real architecture. # # If a virtual architecture is supplied, NVCC generates a PTX code # the virtual architecture. If a pair of a virtual architecture and a # real architecture is supplied, NVCC generates a PTX code for the # virtual architecture as well as a cubin code for the real one. # # For example, making NVCC generate a PTX code for 'compute_60' virtual # architecture, the arch_list has an entry of 'compute_60'. # # arch_list = ['compute_60'] # # For another, making NVCC generate a PTX code for 'compute_61' virtual # architecture and a cubin code for 'sm_61' real architecture, the # arch_list has an entry of ('compute_61', 'sm_61'). # # arch_list = [('compute_61', 'sm_61')] # # See the documentation of each CUDA version for the list of supported # architectures: # # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation if cuda_version >= 11010: arch_list = ['compute_35', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), ('compute_80', 'sm_80'), ('compute_86', 'sm_86'), 'compute_86'] elif cuda_version >= 11000: arch_list = ['compute_35', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), ('compute_80', 'sm_80'), 'compute_80'] elif cuda_version >= 10000: arch_list = ['compute_30', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), ('compute_75', 'sm_75'), 'compute_70'] elif cuda_version >= 9020: arch_list = ['compute_30', 'compute_50', ('compute_60', 'sm_60'), ('compute_61', 'sm_61'), ('compute_70', 'sm_70'), 'compute_70'] else: # This should not happen. assert False options = [] for arch in arch_list: if type(arch) is tuple: virtual_arch, real_arch = arch options.append('--generate-code=arch={},code={}'.format( virtual_arch, real_arch)) else: options.append('--generate-code=arch={},code={}'.format( arch, arch)) return options " 10730,"def alloca_once(builder, ty, size=None, name='', zfill=False): """"""Allocate stack memory at the entry block of the current function pointed by ``builder`` withe llvm type ``ty``. The optional ``size`` arg set the number of element to allocate. The default is 1. The optional ``name`` arg set the symbol name inside the llvm IR for debugging. If ``zfill`` is set, also filling zeros to the memory at the current use-site location. Note that the memory is always zero-filled after the ``alloca`` at init-site (the entry block). """""" if isinstance(size, utils.INT_TYPES): size = ir.Constant(intp_t, size) with builder.goto_entry_block(): ptr = builder.alloca(ty, size=size, name=name) # Always zero-fill at init-site. This is safe. builder.store(ty(None), ptr) # Also zero-fill at the use-site if zfill: builder.store(ty(None), ptr) return ptr ","def alloca_once(builder, ty, size=None, name='', zfill=False): """"""Allocate stack memory at the entry block of the current function pointed by ``builder`` withe llvm type ``ty``. The optional ``size`` arg set the number of element to allocate. The default is 1. The optional ``name`` arg set the symbol name inside the llvm IR for debugging. If ``zfill`` is set, fill the memory with zeros at the current use-site location. Note that the memory is always zero-filled after the ``alloca`` at init-site (the entry block). """""" if isinstance(size, utils.INT_TYPES): size = ir.Constant(intp_t, size) with builder.goto_entry_block(): ptr = builder.alloca(ty, size=size, name=name) # Always zero-fill at init-site. This is safe. builder.store(ty(None), ptr) # Also zero-fill at the use-site if zfill: builder.store(ty(None), ptr) return ptr " 5959,"def get_env_var(key, as_type): """"""Get the environment variable option. :param key: the config key requested :param as_type: the type we would like to convert it to :return: """""" environ_key = ""VIRTUALENV_{}"".format(key.upper()) if environ_key in os.environ: value = os.environ[environ_key] # noinspection PyBroadException try: source = ""env var {}"".format(environ_key) as_type = convert(value, as_type, source) return as_type, source except Exception: pass ","def get_env_var(key, as_type): """"""Get the environment variable option. :param key: the config key requested :param as_type: the type we would like to convert it to :return: """""" environ_key = ""VIRTUALENV_{}"".format(key.upper()) if os.environ.get(environ_key): value = os.environ[environ_key] # noinspection PyBroadException try: source = ""env var {}"".format(environ_key) as_type = convert(value, as_type, source) return as_type, source except Exception: pass " 57669,"def util_load_json(path): with io.open(path, mode='r', encoding='utf-8') as f: return json.loads(f.read()) ","def util_load_json(path): with open(path, 'r') as f: return json.load(f) " 9102,"def test_no_ressources_on_text_extract(): url = ""https://raw.githubusercontent.com/eagletrt/wiki/0f3f16309604f665a47595c890d15af1b3aec6d6/fenice-telemetry-tx/PCB%20Outputs/Pdf/Edge%20Mount%20SMA/TelemetryTX_EM.pdf"" reader = PdfReader(BytesIO(get_pdf_from_url(url, name=""tika-964029.pdf""))) for page in reader.pages: page.extract_text() ","def test_no_ressources_on_text_extract(): url = ""https://github.com/py-pdf/PyPDF2/files/9428434/TelemetryTX_EM.pdf"" reader = PdfReader(BytesIO(get_pdf_from_url(url, name=""tika-964029.pdf""))) for page in reader.pages: page.extract_text() " 220,"def main(argv=sys.argv): """""" Comment on updates that are eligible to be pushed to stable. Queries for updates in the testing state that have a NULL request, looping over them looking for updates that are eligible to be pushed to stable but haven't had comments from Bodhi to this effect. For each such update it finds it will add a comment stating that the update may now be pushed to stable. This function is the entry point for the bodhi-approve-testing console script. Args: argv (list): A list of command line arguments. Defaults to sys.argv. """""" logging.basicConfig(level=logging.ERROR) if len(argv) != 2: usage(argv) settings = get_appsettings(argv[1]) initialize_db(settings) db = Session() buildsys.setup_buildsystem(config) try: testing = db.query(Update).filter_by(status=UpdateStatus.testing, request=None) for update in testing: if not update.release.mandatory_days_in_testing and not update.autotime: # If this release does not have any testing requirements and is not autotime, # skip it print(f""{update.release.name} doesn't have mandatory days in testing"") continue # If this update was already commented, skip it if update.has_stable_comment: continue # If updates have reached the testing threshold, say something! Keep in mind # that we don't care about karma here, because autokarma updates get their request set # to stable by the Update.comment() workflow when they hit the required threshold. Thus, # this function only needs to consider the time requirements because these updates have # not reached the karma threshold. if update.meets_testing_requirements: print(f'{update.alias} now meets testing requirements') update.comment(db, str(config.get('testing_approval_msg')), author='bodhi') notifications.publish(update_schemas.UpdateRequirementsMetStableV1.from_dict( dict(update=update))) if update.autotime and update.days_in_testing >= update.stable_days: print(f""Automatically marking {update.alias} as stable"") update.set_request(db=db, action=UpdateRequest.stable, username=""bodhi"") # For updates that do not stay in testing at all, mark the update as stable # Do note that if we ever configure a release which is composed by bodhi with # mandatory_days_in_testing = 0, its updates will end up being marked as stable # before the compose. This may lead to some unexpected behavior. if update.release.mandatory_days_in_testing == 0: update.status = UpdateStatus.stable db.commit() except Exception as e: print(str(e)) db.rollback() Session.remove() sys.exit(1) ","def main(argv=sys.argv): """""" Comment on updates that are eligible to be pushed to stable. Queries for updates in the testing state that have a NULL request, looping over them looking for updates that are eligible to be pushed to stable but haven't had comments from Bodhi to this effect. For each such update it finds it will add a comment stating that the update may now be pushed to stable. This function is the entry point for the bodhi-approve-testing console script. Args: argv (list): A list of command line arguments. Defaults to sys.argv. """""" logging.basicConfig(level=logging.ERROR) if len(argv) != 2: usage(argv) settings = get_appsettings(argv[1]) initialize_db(settings) db = Session() buildsys.setup_buildsystem(config) try: testing = db.query(Update).filter_by(status=UpdateStatus.testing, request=None) for update in testing: if not update.release.mandatory_days_in_testing and not update.autotime: # If this release does not have any testing requirements and is not autotime, # skip it print(f""{update.release.name} doesn't have mandatory days in testing"") continue # If this update was already commented, skip it if update.has_stable_comment: continue # If updates have reached the testing threshold, say something! Keep in mind # that we don't care about karma here, because autokarma updates get their request set # to stable by the Update.comment() workflow when they hit the required threshold. Thus, # this function only needs to consider the time requirements because these updates have # not reached the karma threshold. if update.meets_testing_requirements: print(f'{update.alias} now meets testing requirements') update.comment(db, str(config.get('testing_approval_msg')), author='bodhi') notifications.publish(update_schemas.UpdateRequirementsMetStableV1.from_dict( dict(update=update))) if update.autotime and update.days_in_testing >= update.stable_days: print(f""Automatically marking {update.alias} as stable"") update.set_request(db=db, action=UpdateRequest.stable, username=""bodhi"") # For updates that do not stay in testing at all, mark the update as stable # Do note that if we ever configure a release which is composed by bodhi with # mandatory_days_in_testing = 0, its updates will end up being marked as stable # before the compose. This may lead to some unexpected behavior. if not update.release.composed_by_bodhi: update.status = UpdateStatus.stable db.commit() except Exception as e: print(str(e)) db.rollback() Session.remove() sys.exit(1) " 31342,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'Picus-GetAccessToken': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" ''' EXECUTION ''' #LOG('command is %s' % (demisto.command(), )) demisto.debug(f'Command being called is {demisto.command()}') try: LOG('Command being called is {command}'.format(command=demisto.command())) if demisto.command() == 'picus-get-access-token': getAccessToken() elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results token = getAccessToken() demisto.results(vectorCompare(token)) elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(attackResultList(token)) elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional token = getAccessToken() demisto.results(specificThreatsResults(token)) elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses token = getAccessToken() demisto.results(peerList(token)) elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses token = getAccessToken() demisto.results(eMailPeerList(token)) elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors token = getAccessToken() demisto.results(attackAllVectors(token)) elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector token = getAccessToken() demisto.results(attackSingle(token)) elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully token = getAccessToken() demisto.results(triggerUpdate(token)) elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config token = getAccessToken() demisto.results(version(token)) elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration token = getAccessToken() demisto.results(threatList(token)) elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(mitigationList(token)) elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters token = getAccessToken() demisto.results(mitreMatrix(token)) elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license token = getAccessToken() demisto.results(sigmaRulesList(token)) elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination token = getAccessToken() demisto.results(vectorList(token)) elif demisto.command() == 'test-module': demisto.results(test_module()) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 24622,"def test_trilinear_approx(): vspace1_args = { ""x_range"": [0, 10], ""y_range"": [0, 10], ""z_range"": [0, 10], ""precision"": [10 / 46, 10 / 46, 10 / 46], ""func"": vspace_func_1, } vspace1 = vector_space(**vspace1_args) vspace2_args = { ""x_range"": [0, 10], ""y_range"": [0, 10], ""z_range"": [0, 10], ""precision"": [10 / 46, 10 / 46, 10 / 46], ""func"": vspace_func_2, } vspace2 = vector_space(**vspace2_args) dx, dy, dz = vspace2[2] dx = dx[0] dy = dy[0] dz = dz[0] f000 = [0, 0, 0] f001 = [0, 0, dz] f010 = [0, dy, 0] f011 = [0, dy, dz] f100 = [dx, 0, 0] f101 = [dx, 0, dz] f110 = [dx, dy, 0] f111 = [dx, dy, dz] mid = [dx / 2, dy / 2, dz / 2] corners = [f000, f001, f010, f011, f100, f101, f110, f111] tlApprox = trilinear_approx(vspace2, [0, 0, 0]) # Testing Trilinear Approx function on the corners for p in corners: approx = tlApprox(p[0], p[1], p[2]) exact = vspace_func_2(p[0], p[1], p[2]) approx = approx.reshape(1, 3) arr = np.isclose(approx, exact, atol=ATOL) assert arr.all() # Testing Trilinear Approx function on a midpoint approx = tlApprox(mid[0], mid[1], mid[2]) approx = approx.reshape(1, 3) arr = np.isclose(approx, [-5.39130435, -21.5652174, 23.68667299], atol=ATOL) assert arr.all() ","def test_trilinear_approx(): vspace1_args = { ""x_range"": [0, 10], ""y_range"": [0, 10], ""z_range"": [0, 10], ""precision"": [10 / 46, 10 / 46, 10 / 46], ""func"": vspace_func_1, } vspace1 = vector_space(**vspace1_args) vspace2_args = { ""x_range"": [0, 10], ""y_range"": [0, 10], ""z_range"": [0, 10], ""precision"": [10 / 46, 10 / 46, 10 / 46], ""func"": vspace_func_2, } vspace2 = vector_space(**vspace2_args) dx, dy, dz = vspace2[2] dx = dx[0] dy = dy[0] dz = dz[0] f000 = [0, 0, 0] f001 = [0, 0, dz] f010 = [0, dy, 0] f011 = [0, dy, dz] f100 = [dx, 0, 0] f101 = [dx, 0, dz] f110 = [dx, dy, 0] f111 = [dx, dy, dz] mid = [dx / 2, dy / 2, dz / 2] corners = [f000, f001, f010, f011, f100, f101, f110, f111] tlApprox = trilinear_approx(vspace2, [0, 0, 0]) # Testing Trilinear Approx function on the corners for p in corners: approx = tlApprox(p[0], p[1], p[2]) exact = vspace_func_2(p[0], p[1], p[2]) approx = approx.reshape(1, 3) arr = np.isclose(approx, exact, atol=ATOL) assert arr.all() # Testing Trilinear Approx function on a midpoint approx = tlApprox(mid[0], mid[1], mid[2]) approx = approx.reshape(1, 3) assert np.allclose(approx, [-5.39130435, -21.5652174, 23.68667299], atol=ATOL) " 33035,"def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None: if for_cache: return if args.build_script is None: return if do_run_build_script: with complete_step(""Copying in build script""): copy_file(args.build_script, os.path.join(root_home(args, root), os.path.basename(args.build_script))) sft: Optional[SourceFileTransfer] = None resolve_symlinks: bool = None if do_run_build_script: sft = args.source_file_transfer resolve_symlinks = args.source_file_resolve_symlinks else: sft = args.source_file_transfer_final resolve_symlinks = args.source_file_resolve_symlinks_final if args.build_sources is None or sft is None: return with complete_step(""Copying in sources""): target = os.path.join(root_home(args, root), ""src"") if sft in ( SourceFileTransfer.copy_git_others, SourceFileTransfer.copy_git_cached, SourceFileTransfer.copy_git_more, ): copy_git_files(args.build_sources, target, source_file_transfer=sft) elif sft == SourceFileTransfer.copy_all: ignore = shutil.ignore_patterns( "".git"", "".mkosi-*"", ""*.cache-pre-dev"", ""*.cache-pre-inst"", os.path.basename(args.output_dir) + ""/"" if args.output_dir else ""mkosi.output/"", os.path.basename(args.workspace_dir) + ""/"" if args.workspace_dir else ""mkosi.workspace/"", os.path.basename(args.cache_path) + ""/"" if args.cache_path else ""mkosi.cache/"", os.path.basename(args.build_dir) + ""/"" if args.build_dir else ""mkosi.builddir/"", os.path.basename(args.include_dir) + ""/"" if args.include_dir else ""mkosi.includedir/"", os.path.basename(args.install_dir) + ""/"" if args.install_dir else ""mkosi.installdir/"", ) shutil.copytree(args.build_sources, target, symlinks=not resolve_symlinks, ignore=ignore) ","def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None: if for_cache: return if args.build_script is None: return if do_run_build_script: with complete_step(""Copying in build script""): copy_file(args.build_script, os.path.join(root_home(args, root), os.path.basename(args.build_script))) sft: Optional[SourceFileTransfer] = None resolve_symlinks: bool = False if do_run_build_script: sft = args.source_file_transfer resolve_symlinks = args.source_file_resolve_symlinks else: sft = args.source_file_transfer_final resolve_symlinks = args.source_file_resolve_symlinks_final if args.build_sources is None or sft is None: return with complete_step(""Copying in sources""): target = os.path.join(root_home(args, root), ""src"") if sft in ( SourceFileTransfer.copy_git_others, SourceFileTransfer.copy_git_cached, SourceFileTransfer.copy_git_more, ): copy_git_files(args.build_sources, target, source_file_transfer=sft) elif sft == SourceFileTransfer.copy_all: ignore = shutil.ignore_patterns( "".git"", "".mkosi-*"", ""*.cache-pre-dev"", ""*.cache-pre-inst"", os.path.basename(args.output_dir) + ""/"" if args.output_dir else ""mkosi.output/"", os.path.basename(args.workspace_dir) + ""/"" if args.workspace_dir else ""mkosi.workspace/"", os.path.basename(args.cache_path) + ""/"" if args.cache_path else ""mkosi.cache/"", os.path.basename(args.build_dir) + ""/"" if args.build_dir else ""mkosi.builddir/"", os.path.basename(args.include_dir) + ""/"" if args.include_dir else ""mkosi.includedir/"", os.path.basename(args.install_dir) + ""/"" if args.install_dir else ""mkosi.installdir/"", ) shutil.copytree(args.build_sources, target, symlinks=not resolve_symlinks, ignore=ignore) " 43429,"def var(op): r""""""Variance of the supplied observable. Args: op (Observable): a quantum observable object """""" if not isinstance(op, Observable): raise QuantumFunctionError( ""{} is not an observable: cannot be used with var"".format(op.name) ) if QNode._current_context is not None: # delete operations from QNode queue QNode._current_context.queue.remove(op) # set return type to be an expectation value op.return_type = ""variance"" if QNode._current_context is not None: # add observable to QNode observable queue QNode._current_context._append_op(op) return op ","def var(op): r""""""Variance of the supplied observable. Args: op (Observable): a quantum observable object """""" if not isinstance(op, Observable): raise QuantumFunctionError( ""{} is not an observable: cannot be used with var"".format(op.name) ) if QNode._current_context is not None: # delete operations from QNode queue QNode._current_context.queue.remove(op) # set return type to be a variance op.return_type = ""variance"" if QNode._current_context is not None: # add observable to QNode observable queue QNode._current_context._append_op(op) return op " 39468,"def main(devices_path, yes, families): devices = {} for path in glob.glob(os.path.join(devices_path, ""*.yaml"")): yamlfile = os.path.basename(path) family = re.match(r'stm32[a-z]+[0-9]', yamlfile)[0] device = os.path.splitext(yamlfile)[0].lower() if family == 'stm32wle5': family = 'stm32wl' if len(families) == 0 or family in families: if family not in devices: devices[family] = [] devices[family].append(device) table = read_device_table() dirs = "", "".join(x.lower()+""/"" for x in devices) print(""Going to create/update the following directories:"") print(dirs) if not yes: input(""Enter to continue, ctrl-C to cancel"") for family in devices: devices[family] = sorted(devices[family]) crate = family.lower() features = make_features(devices[family]) clauses = make_device_clauses(devices[family]) mods = make_mods(devices[family]) ufamily = family.upper() cargo_toml = CARGO_TOML_TPL.format( family=ufamily, crate=crate, version=VERSION, features=features, docs_features=str(CRATE_DOC_FEATURES[crate]), doc_target=CRATE_DOC_TARGETS[crate]) readme = README_TPL.format( family=ufamily, crate=crate, device=devices[family][0], version=VERSION, svd2rust_version=SVD2RUST_VERSION, devices=make_device_rows(table, family)) lib_rs = SRC_LIB_RS_TPL.format(family=ufamily, mods=mods, crate=crate, svd2rust_version=SVD2RUST_VERSION) build_rs = BUILD_TPL.format(device_clauses=clauses) os.makedirs(os.path.join(crate, ""src""), exist_ok=True) with open(os.path.join(crate, ""Cargo.toml""), ""w"") as f: f.write(cargo_toml) with open(os.path.join(crate, ""README.md""), ""w"") as f: f.write(readme) with open(os.path.join(crate, ""src"", ""lib.rs""), ""w"") as f: f.write(lib_rs) with open(os.path.join(crate, ""build.rs""), ""w"") as f: f.write(build_rs) ","def main(devices_path, yes, families): devices = {} for path in glob.glob(os.path.join(devices_path, ""*.yaml"")): yamlfile = os.path.basename(path) family = re.match(r'stm32[a-z]+[0-9]', yamlfile)[0] device = os.path.splitext(yamlfile)[0].lower() if family.startswith('stm32wl'): family = 'stm32wl' if len(families) == 0 or family in families: if family not in devices: devices[family] = [] devices[family].append(device) table = read_device_table() dirs = "", "".join(x.lower()+""/"" for x in devices) print(""Going to create/update the following directories:"") print(dirs) if not yes: input(""Enter to continue, ctrl-C to cancel"") for family in devices: devices[family] = sorted(devices[family]) crate = family.lower() features = make_features(devices[family]) clauses = make_device_clauses(devices[family]) mods = make_mods(devices[family]) ufamily = family.upper() cargo_toml = CARGO_TOML_TPL.format( family=ufamily, crate=crate, version=VERSION, features=features, docs_features=str(CRATE_DOC_FEATURES[crate]), doc_target=CRATE_DOC_TARGETS[crate]) readme = README_TPL.format( family=ufamily, crate=crate, device=devices[family][0], version=VERSION, svd2rust_version=SVD2RUST_VERSION, devices=make_device_rows(table, family)) lib_rs = SRC_LIB_RS_TPL.format(family=ufamily, mods=mods, crate=crate, svd2rust_version=SVD2RUST_VERSION) build_rs = BUILD_TPL.format(device_clauses=clauses) os.makedirs(os.path.join(crate, ""src""), exist_ok=True) with open(os.path.join(crate, ""Cargo.toml""), ""w"") as f: f.write(cargo_toml) with open(os.path.join(crate, ""README.md""), ""w"") as f: f.write(readme) with open(os.path.join(crate, ""src"", ""lib.rs""), ""w"") as f: f.write(lib_rs) with open(os.path.join(crate, ""build.rs""), ""w"") as f: f.write(build_rs) " 58952,"def upload_file(path, name): from azure.storage.blob import BlockBlobService # pylint: disable=import-error sas_token = None if os.path.isfile(TOKEN_PATH): sas_token = open(TOKEN_PATH).read().strip() elif ""AZURE_TOKEN"" in os.environ: sas_token = os.environ[""AZURE_TOKEN""] if sas_token is None: sas_token = subprocess.check_output(""az storage container generate-sas --account-name commadataci --name openpilotci --https-only --permissions lrw \ --expiry $(date -u '+%Y-%m-%dT%H:%M:%SZ' -d '+1 hour') --auth-mode login --as-user --output tsv"", shell=True).decode().strip(""\n"") service = BlockBlobService(account_name=""commadataci"", sas_token=sas_token) service.create_blob_from_path(""openpilotci"", name, path) return ""https://commadataci.blob.core.windows.net/openpilotci/"" + name ","def upload_file(path, name): from azure.storage.blob import BlockBlobService # pylint: disable=import-error sas_token = os.environ.get(""AZURE_TOKEN"", None) if os.path.isfile(TOKEN_PATH): sas_token = open(TOKEN_PATH).read().strip() if sas_token is None: sas_token = subprocess.check_output(""az storage container generate-sas --account-name commadataci --name openpilotci --https-only --permissions lrw \ --expiry $(date -u '+%Y-%m-%dT%H:%M:%SZ' -d '+1 hour') --auth-mode login --as-user --output tsv"", shell=True).decode().strip(""\n"") service = BlockBlobService(account_name=""commadataci"", sas_token=sas_token) service.create_blob_from_path(""openpilotci"", name, path) return ""https://commadataci.blob.core.windows.net/openpilotci/"" + name " 1625,"def make_pipeline(*steps, **kwargs): """"""Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators. memory : None, str or object with the joblib.Memory interface Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. See Also -------- sklearn.pipeline.Pipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) Pipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())]) Returns ------- p : Pipeline """""" memory = kwargs.pop('memory', None) verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError('Unknown keyword arguments: ""{}""' .format(list(kwargs.keys())[0])) return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose) ","def make_pipeline(*steps, **kwargs): """"""Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators. memory : None, str or object with the joblib.Memory interface, default=None Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. See Also -------- sklearn.pipeline.Pipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) Pipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())]) Returns ------- p : Pipeline """""" memory = kwargs.pop('memory', None) verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError('Unknown keyword arguments: ""{}""' .format(list(kwargs.keys())[0])) return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose) " 33793,"def _test_ray_ml_libraries(image_tag: str) -> None: if ""gpu"" not in image_tag: return tf_container = DOCKER_CLIENT.containers.run( f""rayproject/ray-ml:{image_tag}"", ""pip freeze | grep ^tensorflow"", detach=True) tf_logs = tf_container.logs().decode() print(str(tf_logs)) assert ""tensorflow-gpu"" in tf_logs tf_container.stop() torch_container = DOCKER_CLIENT.containers.run( f""rayproject/ray-ml:{image_tag}"", ""pip freeze | grep ^torch"", detach=True) torch_logs = torch_container.logs().decode() print(str(torch_logs)) assert ""cu"" in torch_logs and ""cpu"" not in torch_logs, str(torch_logs) torch_container.stop() ","def _test_ray_ml_libraries(image_tag: str) -> None: if ""gpu"" not in image_tag: return tf_container = DOCKER_CLIENT.containers.run( f""rayproject/ray-ml:{image_tag}"", ""pip freeze | grep ^tensorflow"", detach=True) tf_logs = tf_container.logs().decode() print(str(tf_logs)) assert ""tensorflow-gpu"" in tf_logs, str(tf_logs) tf_container.stop() torch_container = DOCKER_CLIENT.containers.run( f""rayproject/ray-ml:{image_tag}"", ""pip freeze | grep ^torch"", detach=True) torch_logs = torch_container.logs().decode() print(str(torch_logs)) assert ""cu"" in torch_logs and ""cpu"" not in torch_logs, str(torch_logs) torch_container.stop() " 55503,"def create_df_from_partitions(partitions, axis): """""" Create DataFrame from remote partitions. Parameters ---------- partitions : list List of Ray.ObjectRef/Dask.Future referencing to partitions in depend of the engine used. Or list containing tuples of Ray.ObjectRef/Dask.Future referencing to ip addresses of partitions and partitions itself in depend of the engine used. axis : None, 0 or 1 The `axis` parameter is used to identify what are the partitions passed. You have to set: - `axis` to 0 if you want to create DataFrame from row partitions. - `axis` to 1 if you want to create DataFrame from column partitions. - `axis` to None if you want to create DataFrame from 2D list of partitions. Returns ------- DataFrame DataFrame instance created from remote partitions. """""" from modin.data_management.factories.dispatcher import EngineDispatcher factory = EngineDispatcher.get_engine() partition_class = factory.io_cls.frame_cls._frame_mgr_cls._partition_class partition_frame_class = factory.io_cls.frame_cls partition_mgr_class = factory.io_cls.frame_cls._frame_mgr_cls # When collecting partitions to NumPy array they will be kept row-wise if axis is None: if isinstance(partitions[0][0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [ [partition_class(partition, ip=ip) for ip, partition in row] for row in partitions ] ) else: parts = np.array( [ [partition_class(partition) for partition in row] for row in partitions ] ) # When collecting partitions to NumPy array they will be kept row-wise elif axis == 0: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip)] for ip, partition in partitions] ) else: parts = np.array([[partition_class(partition)] for partition in partitions]) # When collecting partitions to NumPy array they will be kept column-wise elif axis == 1: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip) for ip, partition in partitions]] ) else: parts = np.array([[partition_class(partition) for partition in partitions]]) else: raise ValueError( f""Got unacceptable value of axis {axis}. Possible values are {0}, {1} or {None}."" ) index = partition_mgr_class.get_indices(0, parts, lambda df: df.axes[0]) columns = partition_mgr_class.get_indices(1, parts, lambda df: df.axes[1]) return DataFrame( query_compiler=PandasQueryCompiler(partition_frame_class(parts, index, columns)) ) ","def create_df_from_partitions(partitions, axis): """""" Create DataFrame from remote partitions. Parameters ---------- partitions : list List of Ray.ObjectRef/Dask.Future referencing partitions depending on the engine used. Or list of tuples of Ray.ObjectRef/Dask.Future referencing ip addresses of partitions and partitions themselves depending on the engine used. axis : None, 0 or 1 The `axis` parameter is used to identify what are the partitions passed. You have to set: - `axis` to 0 if you want to create DataFrame from row partitions. - `axis` to 1 if you want to create DataFrame from column partitions. - `axis` to None if you want to create DataFrame from 2D list of partitions. Returns ------- DataFrame DataFrame instance created from remote partitions. """""" from modin.data_management.factories.dispatcher import EngineDispatcher factory = EngineDispatcher.get_engine() partition_class = factory.io_cls.frame_cls._frame_mgr_cls._partition_class partition_frame_class = factory.io_cls.frame_cls partition_mgr_class = factory.io_cls.frame_cls._frame_mgr_cls # When collecting partitions to NumPy array they will be kept row-wise if axis is None: if isinstance(partitions[0][0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [ [partition_class(partition, ip=ip) for ip, partition in row] for row in partitions ] ) else: parts = np.array( [ [partition_class(partition) for partition in row] for row in partitions ] ) # When collecting partitions to NumPy array they will be kept row-wise elif axis == 0: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip)] for ip, partition in partitions] ) else: parts = np.array([[partition_class(partition)] for partition in partitions]) # When collecting partitions to NumPy array they will be kept column-wise elif axis == 1: if isinstance(partitions[0], tuple): if EnablePartitionIPs.get() is False: raise ValueError( ""Passed `partitions` with IPs but `MODIN_ENABLE_PARTITIONS_API` env var was not exported."" ) parts = np.array( [[partition_class(partition, ip=ip) for ip, partition in partitions]] ) else: parts = np.array([[partition_class(partition) for partition in partitions]]) else: raise ValueError( f""Got unacceptable value of axis {axis}. Possible values are {0}, {1} or {None}."" ) index = partition_mgr_class.get_indices(0, parts, lambda df: df.axes[0]) columns = partition_mgr_class.get_indices(1, parts, lambda df: df.axes[1]) return DataFrame( query_compiler=PandasQueryCompiler(partition_frame_class(parts, index, columns)) ) " 42787,"def test_str_wrong(): """"""Test that string_function is not a Pandas string method"""""" df = pd.DataFrame( {""text"": [""ragnar"", ""sammywemmy"", ""ginger""], ""code"": [1, 2, 3]} ) with pytest.raises(KeyError): process_text(df, column=""text"", string_function=""ragnar"") ","def test_str_wrong(): """"""Test that string_function is not a Pandas string method"""""" df = pd.DataFrame( {""text"": [""ragnar"", ""sammywemmy"", ""ginger""], ""code"": [1, 2, 3]} ) with pytest.raises(KeyError): df.process_text(column=""text"", string_function=""ragnar"") " 13886,"def pytest_generate_tests(metafunc): """"""generate a list of all available integration tests."""""" is_windows = platform.system() == ""Windows"" global skip_clean skip_clean = metafunc.config.getoption(""skip_clean"") generate_reference = metafunc.config.getoption(""generate_reference"") update_reference = metafunc.config.getoption(""update_reference"") archive_differences = metafunc.config.getoption(""archive_differences"") collected_params = [] if archive_differences: # pragma: no cover diffs_zip = os.path.join(basedir, ""diff.zip"") # Create an empty ZIP zipfile.ZipFile(diffs_zip, mode=""w"").close() for name in findtests(basedir): targets = parse_makefile_for_available_targets( os.path.join(basedir, name, ""Makefile"") ) # check that the ""run"" target lists no unknown formats target_run = targets.get(""run"", set()) unknown_formats = target_run.difference(KNOWN_FORMATS) if unknown_formats: raise ValueError( ""{}/Makefile target 'run' references unknown format {}"".format( name, unknown_formats ) ) # check that all ""run"" targets are actually available unresolved_prereqs = target_run.difference(targets) if unresolved_prereqs: raise ValueError( ""{}/Makefile target 'run' has unresolved prerequisite {}"".format( name, unresolved_prereqs ) ) # check that all available known formats are also listed in the ""run"" target unreferenced_formats = ( set(KNOWN_FORMATS).intersection(targets).difference(target_run) ) if unreferenced_formats: raise ValueError( ""{}/Makefile target 'run' doesn't reference available target {}"".format( name, unreferenced_formats ) ) for format in KNOWN_FORMATS: # only test formats where the Makefile provides a target if format not in targets: continue needs_symlinks = any( [ name == ""linked"" and format == ""html"", name == ""filter-relative-lib"", name == ""filter-relative-lib-from-unfiltered-tracefile"", ] ) marks = [ pytest.mark.xfail( needs_symlinks and is_windows, reason=""have yet to figure out symlinks on Windows"", ), pytest.mark.xfail( name == ""exclude-throw-branches"" and format == ""html"" and is_windows, reason=""branch coverage details seem to be platform-dependent"", ), pytest.mark.xfail( name == ""rounding"" and is_windows, reason=""branch coverage seem to be platform-dependent"", ), ] collected_params.append( pytest.param( name, format, targets, generate_reference, update_reference, archive_differences, marks=marks, id=""-"".join([name, format]), ) ) metafunc.parametrize( ""name, format, available_targets, generate_reference, update_reference, archive_differences"", collected_params, indirect=False, scope=""module"", ) ","def pytest_generate_tests(metafunc): """"""generate a list of all available integration tests."""""" is_windows = platform.system() == ""Windows"" global skip_clean skip_clean = metafunc.config.getoption(""skip_clean"") generate_reference = metafunc.config.getoption(""generate_reference"") update_reference = metafunc.config.getoption(""update_reference"") archive_differences = metafunc.config.getoption(""archive_differences"") collected_params = [] if archive_differences: # pragma: no cover diffs_zip = os.path.join(basedir, ""diff.zip"") # Create an empty ZIP zipfile.ZipFile(diffs_zip, mode=""w"").close() for name in findtests(basedir): targets = parse_makefile_for_available_targets( os.path.join(basedir, name, ""Makefile"") ) # check that the ""run"" target lists no unknown formats target_run = targets.get(""run"", set()) unknown_formats = target_run.difference(KNOWN_FORMATS) if unknown_formats: # pragma: no cover raise ValueError( ""{}/Makefile target 'run' references unknown format {}"".format( name, unknown_formats ) ) # check that all ""run"" targets are actually available unresolved_prereqs = target_run.difference(targets) if unresolved_prereqs: raise ValueError( ""{}/Makefile target 'run' has unresolved prerequisite {}"".format( name, unresolved_prereqs ) ) # check that all available known formats are also listed in the ""run"" target unreferenced_formats = ( set(KNOWN_FORMATS).intersection(targets).difference(target_run) ) if unreferenced_formats: raise ValueError( ""{}/Makefile target 'run' doesn't reference available target {}"".format( name, unreferenced_formats ) ) for format in KNOWN_FORMATS: # only test formats where the Makefile provides a target if format not in targets: continue needs_symlinks = any( [ name == ""linked"" and format == ""html"", name == ""filter-relative-lib"", name == ""filter-relative-lib-from-unfiltered-tracefile"", ] ) marks = [ pytest.mark.xfail( needs_symlinks and is_windows, reason=""have yet to figure out symlinks on Windows"", ), pytest.mark.xfail( name == ""exclude-throw-branches"" and format == ""html"" and is_windows, reason=""branch coverage details seem to be platform-dependent"", ), pytest.mark.xfail( name == ""rounding"" and is_windows, reason=""branch coverage seem to be platform-dependent"", ), ] collected_params.append( pytest.param( name, format, targets, generate_reference, update_reference, archive_differences, marks=marks, id=""-"".join([name, format]), ) ) metafunc.parametrize( ""name, format, available_targets, generate_reference, update_reference, archive_differences"", collected_params, indirect=False, scope=""module"", ) " 47418,"def remove_ignore_keys_(state_dict): ignore_keys = [ ""pretrained.model.head.weight"", ""pretrained.model.head.bias"", ] for k in ignore_keys: state_dict.pop(k, None) ","def remove_ignore_keys_(state_dict): ignore_keys = [""pretrained.model.head.weight"", ""pretrained.model.head.bias""] for k in ignore_keys: state_dict.pop(k, None) " 4604,"def create_tmp_filepath( base_path, image_type=""regular"", suffix=""test"", copy_confounds=False, copy_json=False, old_deriveative_suffix=False ): """"""Create test files in temporary directory."""""" deriveative = ""regressors"" if old_deriveative_suffix else ""timeseries"" # confound files confounds_root = f""_desc-confounds_{deriveative}.tsv"" tmp_conf = base_path / (suffix + confounds_root) if copy_confounds: conf, meta = get_leagal_confound() conf.to_csv(tmp_conf, sep=""\t"", index=False) else: tmp_conf.touch() if copy_json: meta_root = f""_desc-confounds_{deriveative}.json"" tmp_meta = base_path / (suffix + meta_root) conf, meta = get_leagal_confound() with open(tmp_meta, ""w"") as file: json.dump(meta, file, indent=2) # image data # convert path object to string as nibabel do strings img_root = img_file_patterns[image_type] if type(img_root) is str: tmp_img = suffix + img_root tmp_img = base_path / tmp_img tmp_img.touch() tmp_img = str(tmp_img) else: tmp_img = [] for root in img_root: tmp_gii = suffix + root tmp_gii = base_path / tmp_gii tmp_gii.touch() tmp_img.append(str(tmp_gii)) return tmp_img, tmp_conf ","def create_tmp_filepath( base_path, image_type=""regular"", suffix=""test"", copy_confounds=False, copy_json=False, old_derivative_suffix=False ): """"""Create test files in temporary directory."""""" deriveative = ""regressors"" if old_deriveative_suffix else ""timeseries"" # confound files confounds_root = f""_desc-confounds_{deriveative}.tsv"" tmp_conf = base_path / (suffix + confounds_root) if copy_confounds: conf, meta = get_leagal_confound() conf.to_csv(tmp_conf, sep=""\t"", index=False) else: tmp_conf.touch() if copy_json: meta_root = f""_desc-confounds_{deriveative}.json"" tmp_meta = base_path / (suffix + meta_root) conf, meta = get_leagal_confound() with open(tmp_meta, ""w"") as file: json.dump(meta, file, indent=2) # image data # convert path object to string as nibabel do strings img_root = img_file_patterns[image_type] if type(img_root) is str: tmp_img = suffix + img_root tmp_img = base_path / tmp_img tmp_img.touch() tmp_img = str(tmp_img) else: tmp_img = [] for root in img_root: tmp_gii = suffix + root tmp_gii = base_path / tmp_gii tmp_gii.touch() tmp_img.append(str(tmp_gii)) return tmp_img, tmp_conf " 43367,"def poly_quad_expectations(mu, cov, wires, params, hbar=2.): r""""""Calculates the expectation and variance for an arbitrary polynomial of quadrature operators. Args: mu (array): length-2 vector of means. cov (array): :math:`2\times 2` covariance matrix. wires (Sequence[int]): wires to calculate the expectation for. params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear and quadratic coefficients of the quadrature operators :math:`(I, \x_0, \p_0, \x_1, \p_1,\dots)`. hbar (float): (default 2) the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`. Returns: tuple: contains the quadrature expectation and variance. """""" Q = params[0] N = len(mu)//2 # HACK, we need access to the Poly instance in order to expand the matrix! op = qm.expval.PolyXP(Q, wires=wires, do_queue=False) Q = op.heisenberg_obs(N) if Q.ndim == 1: d = np.r_[Q[1::2], Q[2::2]] return d.T @ mu + Q[0], d.T @ cov @ d # convert to the (I, x1,x2,..., p1,p2...) ordering M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :])) M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2])) d1 = M[1:, 0] d2 = M[0, 1:] A = M[1:, 1:] d = d1 + d2 k = M[0, 0] d2 = 2*A @ mu + d k2 = mu.T @ A @ mu + mu.T @ d + k ex = np.trace(A @ cov) + k2 var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2 modes = np.arange(2*N).reshape(2, -1).T groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes]) var -= groenewald_correction return ex, var ","def poly_quad_expectations(mu, cov, wires, params, hbar=2.): r""""""Calculates the expectation and variance for an arbitrary polynomial of quadrature operators. Args: mu (array): length-2 vector of means. cov (array): :math:`2\times 2` covariance matrix. wires (Sequence[int]): wires to calculate the expectation for. params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear and quadratic coefficients of the quadrature operators :math:`(\mathbf{1}, \x_0, \p_0, \x_1, \p_1,\dots)`. hbar (float): (default 2) the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`. Returns: tuple: contains the quadrature expectation and variance. """""" Q = params[0] N = len(mu)//2 # HACK, we need access to the Poly instance in order to expand the matrix! op = qm.expval.PolyXP(Q, wires=wires, do_queue=False) Q = op.heisenberg_obs(N) if Q.ndim == 1: d = np.r_[Q[1::2], Q[2::2]] return d.T @ mu + Q[0], d.T @ cov @ d # convert to the (I, x1,x2,..., p1,p2...) ordering M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :])) M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2])) d1 = M[1:, 0] d2 = M[0, 1:] A = M[1:, 1:] d = d1 + d2 k = M[0, 0] d2 = 2*A @ mu + d k2 = mu.T @ A @ mu + mu.T @ d + k ex = np.trace(A @ cov) + k2 var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2 modes = np.arange(2*N).reshape(2, -1).T groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes]) var -= groenewald_correction return ex, var " 50027,"def test_watched_directory(): for expected in EXPECTED_DIRS: assert amtypes.retrieve_watched_directory(expected[0]) == expected[1] amtypes.retrieve_watched_directory( ""unknown transfer type"" ) == amtypes.WATCHED_STANDARD with pytest.raises(KeyError): amtypes.retrieve_watched_directory(""unknown transfer type with exception"", True) ","def test_watched_directory(): for transfer_type, watched_directory_name in EXPECTED_DIRS: assert amtypes.retrieve_watched_directory(transfer_type) == watched_directory_name amtypes.retrieve_watched_directory( ""unknown transfer type"" ) == amtypes.WATCHED_STANDARD with pytest.raises(KeyError): amtypes.retrieve_watched_directory(""unknown transfer type with exception"", True) " 24732,"def _gen_file_datas(count=1): file_infos = [_gen_file_data(idx) for idx in range(count)] return file_infos ","def _gen_file_datas(count=1): return [_gen_file_data(idx) for idx in range(count)] return file_infos " 43569,"def _unflatten(flat, model): """"""Restores an arbitrary nested structure to a flattened iterable. See also :func:`_flatten`. Args: flat (array): 1D array of items model (array, Iterable, Number): model nested structure Raises: TypeError: if `model` contains an object of unsupported type Returns: (other, array): first elements of flat arranged into the nested structure of model, unused elements of flat """""" if isinstance(model, (numbers.Number, Variable, str)): return flat[0], flat[1:] elif isinstance(model, np.ndarray): idx = model.size res = np.array(flat)[:idx].reshape(model.shape) return res, flat[idx:] elif isinstance(model, Iterable): res = [] for x in model: val, flat = _unflatten(flat, x) res.append(val) return res, flat else: raise TypeError(""Unsupported type in the model: {}"".format(type(model))) ","def _unflatten(flat, model): """"""Restores an arbitrary nested structure to a flattened iterable. See also :func:`_flatten`. Args: flat (array): 1D array of items model (array, Iterable, Number): model nested structure Raises: TypeError: if ``model`` contains an object of unsupported type Returns: (other, array): first elements of flat arranged into the nested structure of model, unused elements of flat """""" if isinstance(model, (numbers.Number, Variable, str)): return flat[0], flat[1:] elif isinstance(model, np.ndarray): idx = model.size res = np.array(flat)[:idx].reshape(model.shape) return res, flat[idx:] elif isinstance(model, Iterable): res = [] for x in model: val, flat = _unflatten(flat, x) res.append(val) return res, flat else: raise TypeError(""Unsupported type in the model: {}"".format(type(model))) " 34526,"def bool_from_any(x: Any) -> bool: """""" Converts bool/float/int/str to bool or raises TypeError """""" if isinstance(x, bool): return x elif isinstance(x, (float, int)): return x == 1. elif isinstance(x, str) and x.isnumeric(): return float(x) == 1. elif isinstance(x, str): return x.strip().lower() == ""true"" else: raise TypeError(""Cannot convert to bool"") ","def bool_from_any(x: Any) -> bool: """""" Converts bool/float/int/str to bool or raises TypeError """""" if isinstance(x, bool): return x elif isinstance(x, (float, int)): return x == 1. elif isinstance(x, str) and x.isnumeric(): return float(x) == 1.0 elif isinstance(x, str): return x.strip().lower() == ""true"" else: raise TypeError(""Cannot convert to bool"") " 20233,"def _get_deploy_environment(): return getattr(settings, 'DEPLOY_ENVIRONMENT', 'local') ","def _get_deploy_environment(): return settings.DEPLOY_ENVIRONMENT or 'local' " 47014,"def speed_metrics(mode, t0, n_objs): """""" Measure and return speed performance metrics. This function requires a time snapshot `t0` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - mode: one of train, eval, test - t0: operation start time - n_objs: number of samples processed """""" runtime = time.time() - t0 result = {} samples_per_second = 1 / (runtime / n_objs) result[f""{mode}_samples_per_second""] = round(samples_per_second, 3) result[f""{mode}_runtime""] = round(runtime, 4) result[f""{mode}_n_ojbs""] = n_objs return result ","def speed_metrics(mode, start_time, num_samples): """""" Measure and return speed performance metrics. This function requires a time snapshot `t0` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - mode: one of train, eval, test - t0: operation start time - n_objs: number of samples processed """""" runtime = time.time() - t0 result = {} samples_per_second = 1 / (runtime / n_objs) result[f""{mode}_samples_per_second""] = round(samples_per_second, 3) result[f""{mode}_runtime""] = round(runtime, 4) result[f""{mode}_n_ojbs""] = n_objs return result " 44135,"def apply_to_measurement(func: Callable): """""" Apply an arbitrary function to a `MeasurementValue` or set of `MeasurementValue`s. (func should be a ""pure"" function) Ex: .. code-block:: python m0 = qml.measure(0) m0_sin = qml.apply_to_measurement(np.sin)(m0) """""" @functools.wraps(func) def wrapper(*args, **kwargs): partial = MeasurementLeaf() for arg in args: if not isinstance(arg, MeasurementValue): arg = MeasurementLeaf(arg) partial = partial.merge(arg) partial.transform_leaves_inplace( lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda ) return partial return wrapper ","def apply_to_measurement(func: Callable): """""" Apply an arbitrary function to one or more ``MeasurementValue`` results. (func should be a ""pure"" function) Ex: .. code-block:: python m0 = qml.measure(0) m0_sin = qml.apply_to_measurement(np.sin)(m0) """""" @functools.wraps(func) def wrapper(*args, **kwargs): partial = MeasurementLeaf() for arg in args: if not isinstance(arg, MeasurementValue): arg = MeasurementLeaf(arg) partial = partial.merge(arg) partial.transform_leaves_inplace( lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda ) return partial return wrapper " 28541,"def abstract_main(cls, validator=None): import argparse from .config import Config, ConfigParseError from .version import __version__ parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) if validator: parser.add_argument('--validate-config', action='store_true', help='Run config validator and exit') parser.add_argument('configfile', nargs='?', default='', help='Patroni may also read the configuration from the {0} environment variable' .format(Config.PATRONI_CONFIG_VARIABLE)) args = parser.parse_args() validate_config = validator and args.validate_config try: if validate_config: Config(args.configfile, validator=validator) sys.exit() config = Config(args.configfile) except ConfigParseError as e: if e.value: print(e.value, file=sys.stderr) if not validate_config: parser.print_help() sys.exit(1) controller = cls(config) try: controller.run() except KeyboardInterrupt: pass finally: controller.shutdown() ","def abstract_main(cls, validator=None): import argparse from .config import Config, ConfigParseError from .version import __version__ parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) if validator: parser.add_argument('--validate-config', action='store_true', help='Run config validator and exit') parser.add_argument('configfile', nargs='?', default='', help='Patroni may also read the configuration from the {0} environment variable' .format(Config.PATRONI_CONFIG_VARIABLE)) args = parser.parse_args() validate_config = validator and args.validate_config try: if validate_config: Config(args.configfile, validator=validator) sys.exit() config = Config(args.configfile) except ConfigParseError as e: if e.value: print(e.value, file=sys.stderr) if not validate_config: sys.exit(e.value) sys.exit(1) controller = cls(config) try: controller.run() except KeyboardInterrupt: pass finally: controller.shutdown() " 32522,"def resolve_type(file_path: str) -> Optional[FileType]: """"""Will classify file_path that tools.find_type could not find types for. Args: file_path: file path to classify Returns: FileType. Conf.json and Metadata files. """""" # if conf.json file # TODO: remove if when demisto-sdk 1.7.3 is released if checked_type(file_path, [content_constant_paths.CONF_PATH.relative_to(get_content_path()).as_posix() if IS_UP_TO_DATE else constants.CONF_PATH]): return FileType.CONF_JSON # landingPage_sections.json file if checked_type(file_path, [LANDING_PAGE_SECTIONS_JSON_PATH]): return FileType.LANDING_PAGE_SECTIONS_JSON # MetaData files elif any(file in file_path for file in (constants.PACKS_PACK_META_FILE_NAME, constants.PACKS_WHITELIST_FILE_NAME,)): return FileType.METADATA # Whitelist file type elif checked_type(file_path, [SECRETS_WHITE_LIST]): return FileType.WHITE_LIST return None ","def resolve_type(file_path: str) -> Optional[FileType]: """"""Will classify file_path that tools.find_type could not find types for. Args: file_path: file path to classify Returns: FileType. Conf.json and Metadata files. """""" # if conf.json file # TODO: remove when demisto-sdk 1.7.3 is released if checked_type(file_path, [content_constant_paths.CONF_PATH.relative_to(get_content_path()).as_posix() if IS_UP_TO_DATE else constants.CONF_PATH]): return FileType.CONF_JSON # landingPage_sections.json file if checked_type(file_path, [LANDING_PAGE_SECTIONS_JSON_PATH]): return FileType.LANDING_PAGE_SECTIONS_JSON # MetaData files elif any(file in file_path for file in (constants.PACKS_PACK_META_FILE_NAME, constants.PACKS_WHITELIST_FILE_NAME,)): return FileType.METADATA # Whitelist file type elif checked_type(file_path, [SECRETS_WHITE_LIST]): return FileType.WHITE_LIST return None " 32363,"def set_password_not_expire(default_base_dn): args = demisto.args() sam_account_name = args.get('username') pwd_n_exp = args.get('value') # query by sAMAccountName if sam_account_name or args.get('sAMAccountName'): if sam_account_name: username = escape_filter_chars(sam_account_name) else: username = escape_filter_chars(args['sAMAccountName']) query = ""(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))"".format(username) entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0) user = entries.get('flat')[0] user_account_control = user.get('userAccountControl')[0] # Check if UAC flag for ""Password Never Expire"" (0x10000) is set to True or False: if pwd_n_exp == 'true': # Sets the bit 16 to 1 user_account_control |= 1 << 16 content_output = f""AD account {username} has set \""password never expire\"" attribute. Value is set to True"" else: # Clears the bit 16 to 0 user_account_control &= ~(1 << 16) content_output = f""AD account {username} has cleared \""password never expire\"" attribute. Value is set to False"" attribute_name = 'userAccountControl' attribute_value = user_account_control dn = user_dn(sam_account_name, default_base_dn) modification = {} modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)] # modify user modify_object(dn, modification) demisto_entry = { 'ContentsFormat': formats['text'], 'Type': entryTypes['note'], 'Contents': content_output } demisto.results(demisto_entry) ","def set_password_not_expire(default_base_dn): args = demisto.args() sam_account_name = args.get('username') pwd_n_exp = args.get('value') # query by sAMAccountName if sam_account_name or args.get('sAMAccountName'): if sam_account_name: username = escape_filter_chars(sam_account_name) else: username = escape_filter_chars(args['sAMAccountName']) query = ""(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))"".format(username) entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0) user = entries.get('flat')[0] user_account_control = user.get('userAccountControl')[0] # Check if UAC flag for ""Password Never Expire"" (0x10000) is set to True or False: if pwd_n_exp: # Sets the bit 16 to 1 user_account_control |= 1 << 16 content_output = f""AD account {username} has set \""password never expire\"" attribute. Value is set to True"" else: # Clears the bit 16 to 0 user_account_control &= ~(1 << 16) content_output = f""AD account {username} has cleared \""password never expire\"" attribute. Value is set to False"" attribute_name = 'userAccountControl' attribute_value = user_account_control dn = user_dn(sam_account_name, default_base_dn) modification = {} modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)] # modify user modify_object(dn, modification) demisto_entry = { 'ContentsFormat': formats['text'], 'Type': entryTypes['note'], 'Contents': content_output } demisto.results(demisto_entry) " 54491,"def get_verbosity() -> int: """"""Return the current level for the Optuna's root logger. Example: Get the default verbosity level: .. testsetup:: def objective(trial): x = trial.suggest_float(""x"", -100, 100) y = trial.suggest_categorical(""y"", [-1, 0, 1]) return x ** 2 + y .. testcode:: import optuna # The default verbosity level of Optuna is `optuna.logging.INFO`. print(optuna.logging.get_verbosity()) # 20 print(optuna.logging.INFO) # 20 # There are logs of the INFO level. study = optuna.create_study() study.optimize(objective, n_trials=5) # [I 2021-10-31 05:35:17,232] A new study created ... # [I 2021-10-31 05:35:17,238] Trial 0 finished with value: ... # [I 2021-10-31 05:35:17,245] Trial 1 finished with value: ... # ... Returns: Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``. .. note:: Optuna has following logging levels: - ``optuna.logging.CRITICAL``, ``optuna.logging.FATAL`` - ``optuna.logging.ERROR`` - ``optuna.logging.WARNING``, ``optuna.logging.WARN`` - ``optuna.logging.INFO`` - ``optuna.logging.DEBUG`` """""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() ","def get_verbosity() -> int: """"""Return the current level for the Optuna's root logger. Example: Get the default verbosity level: .. testsetup:: def objective(trial): x = trial.suggest_float(""x"", -100, 100) y = trial.suggest_categorical(""y"", [-1, 0, 1]) return x ** 2 + y .. testcode:: import optuna # The default verbosity level of Optuna is `optuna.logging.INFO`. print(optuna.logging.get_verbosity()) # 20 print(optuna.logging.INFO) # 20 # There are logs of the INFO level. study = optuna.create_study() study.optimize(objective, n_trials=5) # [I 2021-10-31 05:35:17,232] A new study created ... # [I 2021-10-31 05:35:17,238] Trial 0 finished with value: ... # [I 2021-10-31 05:35:17,245] Trial 1 finished with value: ... # ... .. testoutput:: 20 20 Returns: Logging level, e.g., ``optuna.logging.DEBUG`` and ``optuna.logging.INFO``. .. note:: Optuna has following logging levels: - ``optuna.logging.CRITICAL``, ``optuna.logging.FATAL`` - ``optuna.logging.ERROR`` - ``optuna.logging.WARNING``, ``optuna.logging.WARN`` - ``optuna.logging.INFO`` - ``optuna.logging.DEBUG`` """""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() " 58066,"def override_make_request(self, method: str, uri: str, body: dict, headers: dict): # pragma: no cover """""" This function is an override function to the original duo_client.client.Client._make_request function in API version 4.1.0 The reason for it is that the API creates a bad uri address for the GET requests. """""" conn = self._connect() conn.request(method, uri, body, headers) response = conn.getresponse() data = response.read() self._disconnect(conn) return response, data ","def override_make_request(self, method: str, uri: str, body: dict, headers: dict): # pragma: no cover """""" This function is an override function to the original duo_client.client.Client._make_request function in API version 4.1.0 The reason for it is that the API creates a bad uri address for the GET requests. """""" conn = self._connect() try: conn.request(method, uri, body, headers) response = conn.getresponse() data = response.read() return response, data finally: self._disconnect(conn) " 58079,"def main(): args = demisto.args() filename = args.get('filename') or '' data = args.get('data') or '' data_encoding = args.get('data_encoding') or 'raw' entdy_id = args.get('entryId') try: if entdy_id: res = demisto.executeCommand('getEntry', {'id': entdy_id}) if is_error(res): demisto.results(res) # noqa sys.exit(0) data = demisto.get(res[0], 'Contents') if data_encoding == 'raw': pass elif data_encoding == 'base64': data = base64.b64decode(data) else: raise ValueError(f'Invalid data encoding name: {data_encoding}') demisto.results(fileResult(filename, data)) # noqa except Exception as e: return_error(str(e) + ""\n\nTrace:\n"" + traceback.format_exc()) ","def main(): args = demisto.args() filename = args.get('filename') or '' data = args.get('data') or '' data_encoding = args.get('data_encoding') or 'raw' entdy_id = args.get('entryId') try: if entdy_id: res = demisto.executeCommand('getEntry', {'id': entdy_id}) if is_error(res): demisto.results(res) # noqa sys.exit(0) data = demisto.get(res[0], 'Contents') if data_encoding == 'base64': data = base64.b64decode(data) elif data_encoding != 'raw': raise ValueError(f'Invalid data encoding name: {data_encoding}') demisto.results(fileResult(filename, data)) # noqa except Exception as e: return_error(str(e) + ""\n\nTrace:\n"" + traceback.format_exc()) " 43233,"def test_psrl(args=get_args()): env = gym.make(args.task) args.state_shape = env.observation_space.shape or env.observation_space.n args.action_shape = env.env.action_space.shape or env.env.action_space.n # train_envs = gym.make(args.task) # train_envs = gym.make(args.task) train_envs = VectorEnv( [lambda: gym.make(args.task) for _ in range(args.training_num)]) # test_envs = gym.make(args.task) test_envs = SubprocVectorEnv( [lambda: gym.make(args.task) for _ in range(args.test_num)]) # seed np.random.seed(args.seed) torch.manual_seed(args.seed) train_envs.seed(args.seed) test_envs.seed(args.seed) # model n_action = args.action_shape n_state = args.state_shape p_pri = 1e-3 * np.ones((n_action, n_state, n_state)) rew_mean = np.zeros((n_state, n_action)) rew_std = np.ones((n_state, n_action)) model = PSRLModel(p_pri, rew_mean, rew_std) policy = PSRLPolicy(model) # collector train_collector = Collector( policy, train_envs, ReplayBuffer(args.buffer_size)) test_collector = Collector(policy, test_envs) # log writer = SummaryWriter(args.logdir + '/' + 'FrozenLake') def train_fn(x): policy.set_eps(args.eps_train) def test_fn(x): policy.set_eps(args.eps_test) def stop_fn(x): if env.env.spec.reward_threshold: return x >= env.spec.reward_threshold else: return False # trainer result = offpolicy_trainer( policy, train_collector, test_collector, args.epoch, args.step_per_epoch, args.collect_per_step, args.test_num, args.batch_size, train_fn=train_fn, test_fn=test_fn, stop_fn=stop_fn, writer=writer) train_collector.close() test_collector.close() if __name__ == '__main__': pprint.pprint(result) # Let's watch its performance! env = gym.make(args.task) collector = Collector(policy, env) result = collector.collect(n_episode=1, render=args.render) print(f'Final reward: {result[""rew""]}, length: {result[""len""]}') collector.close() ","def test_psrl(args=get_args()): env = gym.make(args.task) args.state_shape = env.observation_space.shape or env.observation_space.n args.action_shape = env.env.action_space.shape or env.env.action_space.n # train_envs = gym.make(args.task) # train_envs = gym.make(args.task) train_envs = VectorEnv( [lambda: gym.make(args.task) for _ in range(args.training_num)]) # test_envs = gym.make(args.task) test_envs = SubprocVectorEnv( [lambda: gym.make(args.task) for _ in range(args.test_num)]) # seed np.random.seed(args.seed) torch.manual_seed(args.seed) train_envs.seed(args.seed) test_envs.seed(args.seed) # model n_action = args.action_shape n_state = args.state_shape p_pri = 1e-3 * np.ones((n_action, n_state, n_state)) rew_mean = np.zeros((n_state, n_action)) rew_std = np.ones((n_state, n_action)) model = PSRLModel(p_pri, rew_mean, rew_std) policy = PSRLPolicy(model) # collector train_collector = Collector( policy, train_envs, ReplayBuffer(args.buffer_size)) test_collector = Collector(policy, test_envs) # log writer = SummaryWriter(args.logdir + '/' + args.task) def train_fn(x): policy.set_eps(args.eps_train) def test_fn(x): policy.set_eps(args.eps_test) def stop_fn(x): if env.env.spec.reward_threshold: return x >= env.spec.reward_threshold else: return False # trainer result = offpolicy_trainer( policy, train_collector, test_collector, args.epoch, args.step_per_epoch, args.collect_per_step, args.test_num, args.batch_size, train_fn=train_fn, test_fn=test_fn, stop_fn=stop_fn, writer=writer) train_collector.close() test_collector.close() if __name__ == '__main__': pprint.pprint(result) # Let's watch its performance! env = gym.make(args.task) collector = Collector(policy, env) result = collector.collect(n_episode=1, render=args.render) print(f'Final reward: {result[""rew""]}, length: {result[""len""]}') collector.close() " 7402,"def check_numpy_arr(arr, name, bool_expected=False): if type(arr) != np.ndarray: raise ValueError( f""{name} is of type {type(arr)} not nd.array of dtype boolean as"", ""expected"") if bool_expected: if np.sum(np.where(arr > 1, 1, 0)) > 0: raise ValueError( f""{name} is ndarray of dtype {arr.dtype} with non-binary"", ""values.Check if image mask was passed in as expected."") ","def check_numpy_arr(arr, name, bool_expected=False): if type(arr) != np.ndarray: raise ValueError( f""{name} is of type {type(arr)} not nd.array of dtype boolean as"", ""expected"") if bool_expected: if np.sum(np.where(arr > 1, 1, 0)) > 0: raise ValueError( f""{name} is ndarray of dtype {arr.dtype} with non-binary"", ""values. Check if image mask was passed in as expected."") " 53479,"def _is_part_of_namespace_package(filename: str) -> bool: """"""Check if a file is part of a namespace package."""""" filepath = Path(filename) try: modname = modutils.modpath_from_file(filename) except ImportError: modname = [filepath.stem] if filepath.is_dir(): filepath = filepath / ""__init__.py"" try: spec = modutils.file_info_from_modpath(modname) except ImportError: return False return modutils.is_namespace(spec) ","def _is_part_of_namespace_package(filename: str) -> bool: """"""Check if a file is part of a namespace package."""""" try: modname = modutils.modpath_from_file(filename) except ImportError: modname = [Path(filename).stem] try: spec = modutils.file_info_from_modpath(modname) except ImportError: return False return modutils.is_namespace(spec) " 32194,"def scan_result_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: qids = argToList(args.get('qid')) if len(qids) == 0: raise ValueError('QID(s) not specified') command_results: List[CommandResults] = [] for qid in qids: try: qid_data = client.get_value_scan(qid, api_key) if 'data' in qid_data and qid_data['data']: qid_data.update({'qid': qid, 'indicator': qid_data['data']['indicator']}) if qid_data['data']['type'] == 'url' or qid_data['data']['type'] == 'domain': try: screenshot = requests.get( qid_data['data']['properties']['dom']['screenshot'] ) screenshot_file = fileResult( qid_data['data']['properties']['dom']['screenshot'], screenshot.content, file_type=EntryType.ENTRY_INFO_FILE ) screenshot_file['Type'] = entryTypes['image'] demisto.results(screenshot_file) except DemistoException: return_error(f'Failed to execute {demisto.command()} command. Error: Problem getting the screenshot') reputation = qid_data['data']['risk'] score = convert_to_xsoar_severity(reputation) if qid_data['data']['type'] == 'url': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.URL, integration_name='Pulsedive', score=score ) url_indicator = Common.URL( url=qid_data['data']['indicator'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=url_indicator )) if qid_data['data']['type'] == 'ip': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.IP, integration_name='Pulsedive', score=score ) ip_indicator = Common.IP( ip=qid_data['data']['indicator'], asn=qid_data['data']['properties']['geo']['asn'], geo_country=qid_data['data']['properties']['geo']['country'], port=qid_data['data']['attributes']['port'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=ip_indicator )) if qid_data['data']['type'] == 'domain': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.DOMAIN, integration_name='Pulsedive', score=score ) domain_indicator = Common.Domain( domain=qid_data['data']['indicator'], domain_status=qid_data['data']['properties']['whois']['status'], name_servers=qid_data['data']['properties']['whois']['nserver'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=domain_indicator )) else: command_results.append(CommandResults( readable_output=tableToMarkdown('Value Details:', qid_data), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data )) except DemistoException: return_error(f'Failed to execute {demisto.command()} command. Error: Problem with processing the scan results') return command_results ","def scan_result_command(client: Client, args: Dict[str, Any], api_key) -> List[CommandResults]: qids = argToList(args.get('qid')) if len(qids) == 0: raise ValueError('QID(s) not specified') command_results: List[CommandResults] = [] for qid in qids: try: qid_data = client.get_value_scan(qid, api_key) if 'data' in qid_data and qid_data['data']: qid_data.update({'qid': qid, 'indicator': qid_data['data']['indicator']}) if qid_data['data']['type'] == 'url' or qid_data['data']['type'] == 'domain': try: screenshot = requests.get( qid_data['data']['properties']['dom']['screenshot'] ) screenshot_file = fileResult( qid_data['data']['properties']['dom']['screenshot'], screenshot.content, file_type=EntryType.ENTRY_INFO_FILE ) screenshot_file['Type'] = entryTypes['image'] demisto.results(screenshot_file) except DemistoException: return_error(f'Failed to execute {demisto.command()} command. Error: Problem getting the screenshot') reputation = qid_data['data']['risk'] score = convert_to_xsoar_severity(reputation) if qid_data['data']['type'] == 'url': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.URL, integration_name='Pulsedive', score=score ) url_indicator = Common.URL( url=qid_data['data']['indicator'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=url_indicator )) if qid_data['data']['type'] == 'ip': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.IP, integration_name='Pulsedive', score=score ) ip_indicator = Common.IP( ip=qid_data['data']['indicator'], asn=qid_data['data']['properties']['geo']['asn'], geo_country=qid_data['data']['properties']['geo']['country'], port=qid_data['data']['attributes']['port'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=ip_indicator )) if qid_data['data']['type'] == 'domain': dbot_score = Common.DBotScore( indicator=qid_data['data']['indicator'], indicator_type=DBotScoreType.DOMAIN, integration_name='Pulsedive', score=score ) domain_indicator = Common.Domain( domain=qid_data['data']['indicator'], domain_status=qid_data['data']['properties']['whois']['status'], name_servers=qid_data['data']['properties']['whois']['nserver'], dbot_score=dbot_score ) command_results.append(CommandResults( readable_output=tableToMarkdown( 'Value Details:', qid_data, headers=('indicator', 'qid', 'status', 'success') ), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data['data'], indicator=domain_indicator )) else: command_results.append(CommandResults( readable_output=tableToMarkdown('Value Details:', qid_data), outputs_prefix='Pulsedive.ScanResult', outputs_key_field='qid', outputs=qid_data )) except DemistoException: raise DemistoException(f'Failed to execute {demisto.command()} command. Error: Problem with processing the scan results') return command_results " 2749,"def log_loss( y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None, labels=None ): r""""""Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of a logistic model that returns ``y_pred`` probabilities for its training data ``y_true``. The log loss is only defined for two or more labels. For a single sample with true label :math:`y \in \{0,1\}` and a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log loss is: .. math:: L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p)) Read more in the :ref:`User Guide `. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,) Predicted probabilities, as returned by a classifier's predict_proba method. If ``y_pred.shape = (n_samples,)`` the probabilities provided are assumed to be that of the positive class. The labels in ``y_pred`` are assumed to be ordered alphabetically, as done by :class:`preprocessing.LabelBinarizer`. eps : float, default=1e-15 Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, default=True If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like, default=None If not provided, labels will be inferred from y_true. If ``labels`` is ``None`` and ``y_pred`` has shape (n_samples,) the labels are assumed to be binary and are inferred from ``y_true``. .. versionadded:: 0.18 Returns ------- loss : float Raw Log Loss value. This is the value calculated using the ``y_pred`` and ``y_true`` based on the abover formula. Notes ----- The logarithm used is the natural logarithm (base-e). References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Examples -------- >>> from sklearn.metrics import log_loss >>> log_loss([""spam"", ""ham"", ""ham"", ""spam""], ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... """""" y_pred = check_array(y_pred, ensure_2d=False) check_consistent_length(y_pred, y_true, sample_weight) lb = LabelBinarizer() if labels is not None: lb.fit(labels) else: lb.fit(y_true) if len(lb.classes_) == 1: if labels is None: raise ValueError( ""y_true contains only one label ({0}). Please "" ""provide the true labels explicitly through the "" ""labels argument."".format(lb.classes_[0]) ) else: raise ValueError( ""The labels array needs to contain at least two "" ""labels for log_loss, "" ""got {0}."".format(lb.classes_) ) transformed_labels = lb.transform(y_true) if transformed_labels.shape[1] == 1: transformed_labels = np.append( 1 - transformed_labels, transformed_labels, axis=1 ) # Clipping y_pred = np.clip(y_pred, eps, 1 - eps) # If y_pred is of single dimension, assume y_true to be binary # and then check. if y_pred.ndim == 1: y_pred = y_pred[:, np.newaxis] if y_pred.shape[1] == 1: y_pred = np.append(1 - y_pred, y_pred, axis=1) # Check if dimensions are consistent. transformed_labels = check_array(transformed_labels) if len(lb.classes_) != y_pred.shape[1]: if labels is None: raise ValueError( ""y_true and y_pred contain different number of "" ""classes {0}, {1}. Please provide the true "" ""labels explicitly through the labels argument. "" ""Classes found in "" ""y_true: {2}"".format( transformed_labels.shape[1], y_pred.shape[1], lb.classes_ ) ) else: raise ValueError( ""The number of classes in labels is different "" ""from that in y_pred. Classes found in "" ""labels: {0}"".format(lb.classes_) ) # Renormalize y_pred /= y_pred.sum(axis=1)[:, np.newaxis] loss = -(transformed_labels * np.log(y_pred)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) ","def log_loss( y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None, labels=None ): r""""""Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of a logistic model that returns ``y_pred`` probabilities for its training data ``y_true``. The log loss is only defined for two or more labels. For a single sample with true label :math:`y \in \{0,1\}` and a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log loss is: .. math:: L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p)) Read more in the :ref:`User Guide `. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,) Predicted probabilities, as returned by a classifier's predict_proba method. If ``y_pred.shape = (n_samples,)`` the probabilities provided are assumed to be that of the positive class. The labels in ``y_pred`` are assumed to be ordered alphabetically, as done by :class:`preprocessing.LabelBinarizer`. eps : float, default=1e-15 Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, default=True If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like, default=None If not provided, labels will be inferred from y_true. If ``labels`` is ``None`` and ``y_pred`` has shape (n_samples,) the labels are assumed to be binary and are inferred from ``y_true``. .. versionadded:: 0.18 Returns ------- loss : float Log loss, aka logistic loss or cross-entropy loss. Notes ----- The logarithm used is the natural logarithm (base-e). References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Examples -------- >>> from sklearn.metrics import log_loss >>> log_loss([""spam"", ""ham"", ""ham"", ""spam""], ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... """""" y_pred = check_array(y_pred, ensure_2d=False) check_consistent_length(y_pred, y_true, sample_weight) lb = LabelBinarizer() if labels is not None: lb.fit(labels) else: lb.fit(y_true) if len(lb.classes_) == 1: if labels is None: raise ValueError( ""y_true contains only one label ({0}). Please "" ""provide the true labels explicitly through the "" ""labels argument."".format(lb.classes_[0]) ) else: raise ValueError( ""The labels array needs to contain at least two "" ""labels for log_loss, "" ""got {0}."".format(lb.classes_) ) transformed_labels = lb.transform(y_true) if transformed_labels.shape[1] == 1: transformed_labels = np.append( 1 - transformed_labels, transformed_labels, axis=1 ) # Clipping y_pred = np.clip(y_pred, eps, 1 - eps) # If y_pred is of single dimension, assume y_true to be binary # and then check. if y_pred.ndim == 1: y_pred = y_pred[:, np.newaxis] if y_pred.shape[1] == 1: y_pred = np.append(1 - y_pred, y_pred, axis=1) # Check if dimensions are consistent. transformed_labels = check_array(transformed_labels) if len(lb.classes_) != y_pred.shape[1]: if labels is None: raise ValueError( ""y_true and y_pred contain different number of "" ""classes {0}, {1}. Please provide the true "" ""labels explicitly through the labels argument. "" ""Classes found in "" ""y_true: {2}"".format( transformed_labels.shape[1], y_pred.shape[1], lb.classes_ ) ) else: raise ValueError( ""The number of classes in labels is different "" ""from that in y_pred. Classes found in "" ""labels: {0}"".format(lb.classes_) ) # Renormalize y_pred /= y_pred.sum(axis=1)[:, np.newaxis] loss = -(transformed_labels * np.log(y_pred)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) " 5161,"def test_countour_legend_elements(): from matplotlib.collections import LineCollection x = np.arange(1, 10) y = x.reshape(-1, 1) h = x * y cs = plt.contour(h, levels=[10, 30, 50], colors=['blue', '#00FF00', 'red'], extend='both') cs.changed() artists, labels = cs.legend_elements() assert labels == ['$x = 10.0$', '$x = 30.0$', '$x = 50.0$'] assert all(isinstance(a, LineCollection) for a in artists) expected_colors = (np.array([[0., 0., 1., 1.]]), np.array([[0., 1., 0., 1.]]), np.array([[1., 0., 0., 1.]])) assert all(np.all(a.get_color() == c) for a, c in zip(artists, expected_colors)) ","def test_contour_legend_elements(): from matplotlib.collections import LineCollection x = np.arange(1, 10) y = x.reshape(-1, 1) h = x * y cs = plt.contour(h, levels=[10, 30, 50], colors=['blue', '#00FF00', 'red'], extend='both') cs.changed() artists, labels = cs.legend_elements() assert labels == ['$x = 10.0$', '$x = 30.0$', '$x = 50.0$'] assert all(isinstance(a, LineCollection) for a in artists) expected_colors = (np.array([[0., 0., 1., 1.]]), np.array([[0., 1., 0., 1.]]), np.array([[1., 0., 0., 1.]])) assert all(np.all(a.get_color() == c) for a, c in zip(artists, expected_colors)) " 6809,"def send_via_gateway(arg): ss = frappe.get_doc('SMS Settings', 'SMS Settings') headers = get_headers(ss) use_json = ""Content-Type"" in headers and headers[""Content-Type""] == ""application/json"" message = arg.get('message').decode('utf-8') if use_json else arg.get('message') args = {ss.message_parameter: message} for d in ss.get(""parameters""): if not d.header: args[d.parameter] = d.value success_list = [] for d in arg.get('receiver_list'): args[ss.receiver_parameter] = d status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json) if 200 <= status < 300: success_list.append(d) if len(success_list) > 0: args.update(arg) create_sms_log(args, success_list) if arg.get('success_msg'): frappe.msgprint(_(""SMS sent to following numbers: {0}"").format(""\n"" + ""\n"".join(success_list))) ","def send_via_gateway(arg): ss = frappe.get_doc('SMS Settings', 'SMS Settings') headers = get_headers(ss) use_json = ""Content-Type"" in headers and headers[""Content-Type""] == ""application/json"" message = frappe.safe_decode(arg.get('message')) args = {ss.message_parameter: message} for d in ss.get(""parameters""): if not d.header: args[d.parameter] = d.value success_list = [] for d in arg.get('receiver_list'): args[ss.receiver_parameter] = d status = send_request(ss.sms_gateway_url, args, headers, ss.use_post, use_json) if 200 <= status < 300: success_list.append(d) if len(success_list) > 0: args.update(arg) create_sms_log(args, success_list) if arg.get('success_msg'): frappe.msgprint(_(""SMS sent to following numbers: {0}"").format(""\n"" + ""\n"".join(success_list))) " 30956,"def get_user_iam(default_base_dn, page_size, args): user_profile = args.get(""user-profile"") user_profile_delta = args.get('user-profile-delta') default_attribute = args.get('defult_attribute', ""samaccountname"") iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta) ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER) value = ad_user.get(default_attribute) # removing keys with no values user = {k: v for k, v in ad_user.items() if v} attributes = list(user.keys()) query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))' try: entries = search_with_paging( query, default_base_dn, attributes=attributes, size_limit=DEFAULT_LIMIT, page_size=page_size ) if not entries.get('flat'): iam_user_profile.set_result(success=False, error_message=""No user was found"") else: ad_user = entries.get('flat')[0] user_account_control = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS ad_user[""userAccountControl""] = user_account_control iam_user_profile.set_result(success=True, email=ad_user.get('email'), username=ad_user.get('name'), details=ad_user, active=user_account_control) user_profile.update_with_app_data(ad_user, INCOMING_MAPPER) return_results(iam_user_profile) except Exception as e: iam_user_profile.set_result(success=False, error_message=str(e)) return_results(iam_user_profile) ","def get_user_iam(default_base_dn, page_size, args): user_profile = args.get(""user-profile"") user_profile_delta = args.get('user-profile-delta') default_attribute = args.get('defult_attribute', ""samaccountname"") iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta) ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER) value = ad_user.get(default_attribute) # removing keys with no values user = {k: v for k, v in ad_user.items() if v} attributes = list(user.keys()) query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))' try: entries = search_with_paging( query, default_base_dn, attributes=attributes, size_limit=DEFAULT_LIMIT, page_size=1 ) if not entries.get('flat'): iam_user_profile.set_result(success=False, error_message=""No user was found"") else: ad_user = entries.get('flat')[0] user_account_control = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS ad_user[""userAccountControl""] = user_account_control iam_user_profile.set_result(success=True, email=ad_user.get('email'), username=ad_user.get('name'), details=ad_user, active=user_account_control) user_profile.update_with_app_data(ad_user, INCOMING_MAPPER) return_results(iam_user_profile) except Exception as e: iam_user_profile.set_result(success=False, error_message=str(e)) return_results(iam_user_profile) " 1829,"def _unique_python(values, *, return_inverse): # Only used in `_uniques`, see docstring there for details try: uniques_set = set(values) none_in_set = None in uniques_set nan_in_set = np.nan in uniques_set if none_in_set and nan_in_set: raise ValueError(""Input wiith both types of missing, None and "" ""np.nan, is not supported"") if none_in_set: uniques_set.remove(None) uniques = sorted(uniques_set) uniques.append(None) elif nan_in_set: uniques_set.remove(np.nan) uniques = sorted(uniques_set) uniques.append(np.nan) else: uniques = sorted(uniques_set) uniques = np.array(uniques, dtype=values.dtype) except TypeError: types = sorted(t.__qualname__ for t in set(type(v) for v in values)) raise TypeError(""Encoders require their input to be uniformly "" f""strings or numbers. Got {types}"") if return_inverse: table = {val: i for i, val in enumerate(uniques)} inverse = np.array([table[v] for v in values]) return uniques, inverse return uniques ","def _unique_python(values, *, return_inverse): # Only used in `_uniques`, see docstring there for details try: uniques_set = set(values) none_in_set = None in uniques_set nan_in_set = np.nan in uniques_set if none_in_set and nan_in_set: raise ValueError(""Input with both types of missing, None and "" ""np.nan, is not supported"") if none_in_set: uniques_set.remove(None) uniques = sorted(uniques_set) uniques.append(None) elif nan_in_set: uniques_set.remove(np.nan) uniques = sorted(uniques_set) uniques.append(np.nan) else: uniques = sorted(uniques_set) uniques = np.array(uniques, dtype=values.dtype) except TypeError: types = sorted(t.__qualname__ for t in set(type(v) for v in values)) raise TypeError(""Encoders require their input to be uniformly "" f""strings or numbers. Got {types}"") if return_inverse: table = {val: i for i, val in enumerate(uniques)} inverse = np.array([table[v] for v in values]) return uniques, inverse return uniques " 29811,"def info_message(msg): """"""Format a paasta info message. :param msg: a string :return: a beautiful string """""" return ""{} {}"".format(info_mark(), msg) ","def info_message(msg: str) -> str: """"""Format a paasta info message. :param msg: a string :return: a beautiful string """""" return ""{} {}"".format(info_mark(), msg) " 42988,"def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (ndarray): The state that the BS is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: ndarray: State where the two-mode operation has been applied """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret ","def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate=""BSgate""): """"""Applies a two-mode gate to a state Applies the two-mode gate to the state using custom tensor contractions and the numba compiler for faster application. Args: mat (ndarray): The BS operator to be applied to the state state (array[complex]): The state that the operator is applied to pure (bool): If the state is pure or mixed modes (list[int]): A list of modes to which the BS is applied n (int): The total number of modes trunc (int): The Hilbert space truncation/cutoff gate (str): the gate which should be called (BSgate, S2gate) Returns: ndarray: State where the two-mode operation has been applied """""" if pure: t1 = modes[0] t2 = modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(n) switch_list_2 = np.arange(n) switch_list_1[[0, t1]] = switch_list_1[[t1, 0]] switch_list_2[[1, t2]] = switch_list_2[[t2, 1]] state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(switch_list_1) else: t1 = 2 * modes[0] t2 = 2 * modes[1] # put the ket-values in front to be operated on in the apply function switch_list_1 = np.arange(2 * n) switch_list_2 = np.arange(2 * n) switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]] switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]] # put bra-values to the left, and ket-values to the right (ignoring values not operated on) transpose_list = np.arange(2 * n) transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] state = state.transpose(transpose_list) state = state.transpose(switch_list_1) if gate == ""BSgate"": state = _apply_BS(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_BS(mat.conj(), state, trunc) elif gate == ""S2gate"": state = _apply_S2(mat, state, trunc) state = state.transpose(switch_list_1) state = state.transpose(switch_list_2) state = _apply_S2(mat.conj(), state, trunc) else: raise NotImplementedError state = state.transpose(switch_list_2) ret = state.transpose(transpose_list) return ret " 34434,"def add_errors_success_params(parser: argparse.ArgumentParser): parser.add_argument( ""--successes"", action=""store_true"", default=False, help=""If set successful predictions will be written to a file."", ) parser.add_argument( ""--no-errors"", action=""store_true"", default=False, help=""If set incorrect predictions will NOT be written to a file."", ) ","def add_errors_success_params(parser: argparse.ArgumentParser) -> None: parser.add_argument( ""--successes"", action=""store_true"", default=False, help=""If set successful predictions will be written to a file."", ) parser.add_argument( ""--no-errors"", action=""store_true"", default=False, help=""If set incorrect predictions will NOT be written to a file."", ) " 32632,"def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" secret_key = demisto.params().get('credentials').get('password') client_key = demisto.params().get('credentials').get('identifier') organisation_id = demisto.params().get('organization_id') # get the service API url base_url = demisto.params().get(""api_url"") # How much time before the first fetch to retrieve incidents proxy = demisto.params().get('proxy', False) LOG(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, organisation_id=organisation_id, secret_key=secret_key, client_key=client_key, proxy=proxy) commands = { 'test-module': test_module, ""umbrella-reporting-destination-list"": get_destinations_list, ""umbrella-reporting-category-list"": get_categories_list, ""umbrella-reporting-identity-list"": get_identities_list, ""umbrella-reporting-event-type-list"": get_event_types_list, ""umbrella-reporting-file-list"": get_file_list, ""umbrella-reporting-threat-list"": get_threat_list, ""umbrella-reporting-activity-list"": get_activity_list, ""umbrella-reporting-activity-get"": get_activity_by_traffic_type, ""umbrella-reporting-summary-list"": get_summary_list } args = demisto.args() command = demisto.command() if command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError # Log exceptions except Exception as e: return_error( f'Failed to execute {demisto.command()} command. Error: {str(e)}') ","def main(): """""" PARSE AND VALIDATE INTEGRATION PARAMS """""" secret_key = demisto.params().get('credentials').get('password') client_key = demisto.params().get('credentials').get('identifier') organisation_id = demisto.params().get('organization_id') # get the service API url base_url =params.get(""api_url"") # How much time before the first fetch to retrieve incidents proxy = demisto.params().get('proxy', False) LOG(f'Command being called is {demisto.command()}') try: client = Client( base_url=base_url, organisation_id=organisation_id, secret_key=secret_key, client_key=client_key, proxy=proxy) commands = { 'test-module': test_module, ""umbrella-reporting-destination-list"": get_destinations_list, ""umbrella-reporting-category-list"": get_categories_list, ""umbrella-reporting-identity-list"": get_identities_list, ""umbrella-reporting-event-type-list"": get_event_types_list, ""umbrella-reporting-file-list"": get_file_list, ""umbrella-reporting-threat-list"": get_threat_list, ""umbrella-reporting-activity-list"": get_activity_list, ""umbrella-reporting-activity-get"": get_activity_by_traffic_type, ""umbrella-reporting-summary-list"": get_summary_list } args = demisto.args() command = demisto.command() if command in commands: return_results(commands[command](client, args)) else: raise NotImplementedError # Log exceptions except Exception as e: return_error( f'Failed to execute {demisto.command()} command. Error: {str(e)}') " 38451,"def dump_mortar_projections_to_file(g, mg, proj, fn, mode=""w""): """""" Dump a PorePy grid to a file that can be read by as an unstructured opm grid. Parameters: g (Grid): The grid will be written to file fn (String): The file name. This will be passed to open() using 'w' Returns: None """""" if not np.allclose(proj.data, 1): raise NotImplementedError(""Can not store non-matching grids, yet."") if not (proj.getformat() == ""csc""): proj = proj.tocsc() # Test if directory in file name exists and create if not dirpath = os.path.dirname(fn) if len(dirpath) > 0: os.makedirs(dirpath, exist_ok=True) # Open file and start writing with open(fn, mode) as outfile: # Write grid info outfile.write("" "".join(map(str, proj.indptr)) + ""\n"") outfile.write("" "".join(map(str, proj.indices)) + ""\n"") ","def dump_mortar_projections_to_file(g: pp.Grid, mg: pp.MortarGrid, proj: sps.spmatrix, fn: str, mode: str=""w"") -> None: """""" Dump a PorePy grid to a file that can be read by as an unstructured opm grid. Parameters: g (Grid): The grid will be written to file fn (String): The file name. This will be passed to open() using 'w' Returns: None """""" if not np.allclose(proj.data, 1): raise NotImplementedError(""Can not store non-matching grids, yet."") if not (proj.getformat() == ""csc""): proj = proj.tocsc() # Test if directory in file name exists and create if not dirpath = os.path.dirname(fn) if len(dirpath) > 0: os.makedirs(dirpath, exist_ok=True) # Open file and start writing with open(fn, mode) as outfile: # Write grid info outfile.write("" "".join(map(str, proj.indptr)) + ""\n"") outfile.write("" "".join(map(str, proj.indices)) + ""\n"") " 6152,"def urlFinder(module): """""" Try to guess the url with module name :param module: path writed like import (e.g. ""DIRAC.something.something"") """""" sections = module.split('.') for section in sections: # This condition is a bit long # We search something which look like <...>.System.<...>.Handler # If find we return // if(section.find(""System"") > 0) and (sections[-1].find('Handler') > 0): return ""/%s/%s"" % (section.replace(""System"", """"), sections[-1].replace(""Handler"", """")) return None ","def urlFinder(module): """""" Try to guess the url with module name :param module: path written like import (e.g. ""DIRAC.something.something"") """""" sections = module.split('.') for section in sections: # This condition is a bit long # We search something which look like <...>.System.<...>.Handler # If find we return // if(section.find(""System"") > 0) and (sections[-1].find('Handler') > 0): return ""/%s/%s"" % (section.replace(""System"", """"), sections[-1].replace(""Handler"", """")) return None " 47755,"def test__api__parse_string(): """"""Basic checking of parse functionality."""""" parsed = sqlfluff.parse(my_bad_query) # Check we a JSON object is returned. assert isinstance(parsed, dict) # Load in expected result. with open(""test/fixtures/api/parse_test/parse_test.json"", ""r"") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed ","def test__api__parse_string(): """"""Basic checking of parse functionality."""""" parsed = sqlfluff.parse(my_bad_query) # Check a JSON object is returned. assert isinstance(parsed, dict) # Load in expected result. with open(""test/fixtures/api/parse_test/parse_test.json"", ""r"") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed " 17694,"def _get_fs_type(path): try: from psutil import disk_partitions match = """" fs = """" for part in disk_partitions(): if path.startswith(part.mountpoint) and \ len(match) < len(part.mountpoint): fs = part.fstype match = part.mountpoint except Exception as exc: ce = CapturedException(exc) lgr.warning(""Failed to get filesystem information: %s"", ce) fs = tuple() return f'{path}: {fs}' ","def _get_fs_type(path): try: from psutil import disk_partitions match = """" fs = """" for part in disk_partitions(): if path.startswith(part.mountpoint) and \ len(match) < len(part.mountpoint): fs = part.fstype match = part.mountpoint except Exception as exc: ce = CapturedException(exc) lgr.warning(""Failed to get filesystem information: %s"", ce) fs = ""Install 'psutils' to get filesystem information"" return f'{path}: {fs}' " 30783,"def map_changes_to_existing_user(existing_user, new_json): # if existing_user is not None: for k, v in new_json.items(): if type(v) == list: # handle in specific way # as of now only emails needs to be handled if k == 'emails': existing_email_list = existing_user.get(k) # update for i in v: for j in existing_email_list: if j.get('type') == i.get('type'): if j.get('value') != i.get('value'): j['value'] = i.get('value') if i.get('primary', None) is not None: j['primary'] = i.get('primary') else: if j.get('primary', None) is not None: j['primary'] = j.get('primary') break # add new_email_list = [] for i in v: exist = False for j in existing_email_list: if i.get('type') == j.get('type', ''): exist = True break if not exist: new_email = {'type': i.get('type'), 'value': i.get('value')} if i.get('primary', None) is not None: new_email.update({'primary': i.get('primary')}) new_email_list.append(new_email) existing_email_list.extend(new_email_list) elif type(v) == dict: if k != SCIM_EXTENSION_SCHEMA: map_changes_to_existing_user(existing_user.get(k), v) else: existing_user[k] = v ","def map_changes_to_existing_user(existing_user, new_json): # if existing_user is not None: for k, v in new_json.items(): if type(v) == list: # handle in specific way # as of now only emails needs to be handled if k == 'emails': existing_email_list = existing_user.get(k) # update for i in v: for existing_email in existing_email_list: if j.get('type') == i.get('type'): if j.get('value') != i.get('value'): j['value'] = i.get('value') if i.get('primary', None) is not None: j['primary'] = i.get('primary') else: if j.get('primary', None) is not None: j['primary'] = j.get('primary') break # add new_email_list = [] for i in v: exist = False for j in existing_email_list: if i.get('type') == j.get('type', ''): exist = True break if not exist: new_email = {'type': i.get('type'), 'value': i.get('value')} if i.get('primary', None) is not None: new_email.update({'primary': i.get('primary')}) new_email_list.append(new_email) existing_email_list.extend(new_email_list) elif type(v) == dict: if k != SCIM_EXTENSION_SCHEMA: map_changes_to_existing_user(existing_user.get(k), v) else: existing_user[k] = v " 23645,"def get_total_irradiance(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, albedo=.25, surface_type=None, model='isotropic', model_perez='allsitescomposite1990', **kwargs): r"""""" Determine total in-plane irradiance and its beam, sky diffuse and ground reflected components, using the specified sky diffuse irradiance model. .. math:: I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground} Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] albedo : numeric, default 0.25 Surface albedo. [unitless] surface_type : None or String, default None Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for the list of accepted values. model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- total_irrad : OrderedDict or DataFrame Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse', 'poa_ground_diffuse'``. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """""" poa_sky_diffuse = get_sky_diffuse( surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model, model_perez=model_perez) poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo, surface_type) aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse) return irrads ","def get_total_irradiance(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, albedo=.25, surface_type=None, model='isotropic', model_perez='allsitescomposite1990', **kwargs): r"""""" Determine total in-plane irradiance and its beam, sky diffuse and ground reflected components, using the specified sky diffuse irradiance model. .. math:: I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground} Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] albedo : numeric, default 0.25 Surface albedo. [unitless] surface_type : None or String, default None Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for the list of accepted values. model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', ``'reindl'``, ``'king'``, ``'perez'``. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- total_irrad : OrderedDict or DataFrame Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse', 'poa_ground_diffuse'``. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """""" poa_sky_diffuse = get_sky_diffuse( surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model, model_perez=model_perez) poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo, surface_type) aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse) return irrads " 54718,"def test_str_cat(): ""Test Pandas string cat method"" df = pd.DataFrame({""text"": [""a"", ""b"", ""c"", ""d""], ""numbers"": range(1, 5)}) result = process_text( df=df, column=""text"", string_function=""cat"", others=[""A"", ""B"", ""C"", ""D""], ) expected = pd.DataFrame( {""text"": [""aA"", ""bB"", ""cC"", ""dD""], ""numbers"": [1, 2, 3, 4]} ) assert_frame_equal(result, expected) ","def test_str_cat(): ""Test Pandas string cat method"" df = pd.DataFrame({""text"": [""a"", ""b"", ""c"", ""d""], ""numbers"": range(1, 5)}) result = df.process_text( column=""text"", string_function=""cat"", others=[""A"", ""B"", ""C"", ""D""], ) expected = pd.DataFrame( {""text"": [""aA"", ""bB"", ""cC"", ""dD""], ""numbers"": [1, 2, 3, 4]} ) assert_frame_equal(result, expected) " 23634,"def test_noct_options(): poa_global, temp_air, wind_speed, noct, eta_m_ref = (1000., 25., 1., 45., 0.2) effective_irradiance = 1100. transmittance_absorbtance = 0.8 array_height = 2 mount_standoff = 2.0 result = temperature.noct(poa_global, temp_air, wind_speed, noct, eta_m_ref, effective_irradiance, transmittance_absorbtance, array_height, mount_standoff) expected = 58.36654459 assert np.isclose(result, expected) ","def test_noct_options(): poa_global, temp_air, wind_speed, noct, eta_m_ref = (1000., 25., 1., 45., 0.2) effective_irradiance = 1100. transmittance_absorbtance = 0.8 array_height = 2 mount_standoff = 2.0 result = temperature.noct(poa_global, temp_air, wind_speed, noct, eta_m_ref, effective_irradiance, transmittance_absorbtance, array_height, mount_standoff) expected = 58.36654459 assert_allclose(result, expected) " 31557,"def run_long_running(params: Dict, is_test: bool = False): """""" Start the long running server :param params: Demisto params :param is_test: Indicates whether it's test-module run or regular run :return: None """""" nginx_process = None try: nginx_port = get_params_port(params) server_port = nginx_port + 1 # set our own log handlers APP.logger.removeHandler(default_handler) integration_logger = IntegrationLogger() integration_logger.buffering = False log_handler = DemistoHandler(integration_logger) log_handler.setFormatter( logging.Formatter(""flask log: [%(asctime)s] %(levelname)s in %(module)s: %(message)s"") ) APP.logger.addHandler(log_handler) demisto.debug('done setting demisto handler for logging') server = WSGIServer(('0.0.0.0', server_port), APP, log=DEMISTO_LOGGER, error_log=ERROR_LOGGER) if is_test: test_nginx_server(nginx_port, params) server_process = Process(target=server.serve_forever) server_process.start() time.sleep(5) server_process.terminate() else: nginx_process = start_nginx_server(nginx_port, params) server.serve_forever() except Exception as e: demisto.error(f'An error occurred: {str(e)}') raise ValueError(str(e)) finally: if nginx_process: try: nginx_process.terminate() except Exception as ex: demisto.error(f'Failed stopping nginx process when exiting: {ex}') ","def run_long_running(params: Dict, is_test: bool = False): """""" Start the long running server :param params: Demisto params :param is_test: Indicates whether it's test-module run or regular run :return: None """""" nginx_process = None try: nginx_port = get_params_port(params) server_port = nginx_port + 1 # set our own log handlers APP.logger.removeHandler(default_handler) integration_logger = IntegrationLogger() integration_logger.buffering = False log_handler = DemistoHandler(integration_logger) log_handler.setFormatter( logging.Formatter(""flask log: [%(asctime)s] %(levelname)s in %(module)s: %(message)s"") ) APP.logger.addHandler(log_handler) demisto.debug('done setting demisto handler for logging') server = WSGIServer(('0.0.0.0', server_port), APP, log=DEMISTO_LOGGER, error_log=ERROR_LOGGER) if is_test: test_nginx_server(nginx_port, params) server_process = Process(target=server.serve_forever) server_process.start() time.sleep(5) server_process.terminate() else: nginx_process = start_nginx_server(nginx_port, params) server.serve_forever() except Exception as e: demisto.error(f'An error occurred: {str(e)}') raise ValueError(str(e)) finally: if nginx_process: try: nginx_process.terminate() except Exception as ex: demisto.error(f'Failed stopping nginx process when exiting: {ex}') " 20748,"def createGroupOperationForArrange(nodes_to_arrange: List[""SceneNode""], build_volume: ""BuildVolume"", fixed_nodes: Optional[List[""SceneNode""]] = None, factor = 10000, add_new_nodes_in_scene: bool = False): scene_root = Application.getInstance().getController().getScene().getRoot() found_solution_for_all, node_items = findNodePlacement(nodes_to_arrange, build_volume, fixed_nodes, factor) not_fit_count = 0 grouped_operation = GroupedOperation() for node, node_item in zip(nodes_to_arrange, node_items): if add_new_nodes_in_scene: grouped_operation.addOperation(AddSceneNodeOperation(node, scene_root)) if node_item.binId() == 0: # We found a spot for it rotation_matrix = Matrix() rotation_matrix.setByRotationAxis(node_item.rotation(), Vector(0, -1, 0)) grouped_operation.addOperation(RotateOperation(node, Quaternion.fromMatrix(rotation_matrix))) grouped_operation.addOperation(TranslateOperation(node, Vector(node_item.translation().x() / factor, 0, node_item.translation().y() / factor))) else: # We didn't find a spot grouped_operation.addOperation( TranslateOperation(node, Vector(200, node.getWorldPosition().y, -not_fit_count * 20), set_position=True)) not_fit_count += 1 return grouped_operation, not_fit_count ","def createGroupOperationForArrange(nodes_to_arrange: List[""SceneNode""], build_volume: ""BuildVolume"", fixed_nodes: Optional[List[""SceneNode""]] = None, factor = 10000, add_new_nodes_in_scene: bool = False) -> Tuple[GroupedOperation, int]: scene_root = Application.getInstance().getController().getScene().getRoot() found_solution_for_all, node_items = findNodePlacement(nodes_to_arrange, build_volume, fixed_nodes, factor) not_fit_count = 0 grouped_operation = GroupedOperation() for node, node_item in zip(nodes_to_arrange, node_items): if add_new_nodes_in_scene: grouped_operation.addOperation(AddSceneNodeOperation(node, scene_root)) if node_item.binId() == 0: # We found a spot for it rotation_matrix = Matrix() rotation_matrix.setByRotationAxis(node_item.rotation(), Vector(0, -1, 0)) grouped_operation.addOperation(RotateOperation(node, Quaternion.fromMatrix(rotation_matrix))) grouped_operation.addOperation(TranslateOperation(node, Vector(node_item.translation().x() / factor, 0, node_item.translation().y() / factor))) else: # We didn't find a spot grouped_operation.addOperation( TranslateOperation(node, Vector(200, node.getWorldPosition().y, -not_fit_count * 20), set_position=True)) not_fit_count += 1 return grouped_operation, not_fit_count " 4413,"def _data_path(path=None, force_update=False, update_path=True, download=True, name=None, check_version=False, return_version=False, accept=False): """"""Aux function. This is a general function for fetching MNE datasets. In order to define an MNE dataset, one needs to define a URL, archive name, folder name, and environment configuration key. They also need to define a ``pooch`` registry txt file mapping the dataset archive name to a hash (i.e. md5, or sha). Parameters ---------- path : None | str Location of where to look for the {name} dataset. If None, the environment variable or config parameter ``{conf}`` is used. If it doesn't exist, the ""~/mne_data"" directory is used. If the {name} dataset is not found under the given path, the data will be automatically downloaded to the specified folder. force_update : bool Force update of the {name} dataset even if a local copy exists. Default is False. update_path : bool | None If True (default), set the ``{conf}`` in mne-python config to the given path. If None, the user is prompted. download : bool If False and the {name} dataset has not been downloaded yet, it will not be downloaded and the path will be returned as '' (empty string). This is mostly used for debugging purposes and can be safely ignored by most users. name : str | None The name of the dataset, which should correspond to the URL, archive name, folder names and configuration key mappings for pooch. check_version : bool Whether to check the version of the dataset or not. Each version of the dataset is stored in the root with a ``version.txt`` file. return_version : bool Whether or not to return the version of the dataset or not. Defaults to False. accept : bool Some datasets require an acceptance of an additional license. Default to False. If this is True, then license text should be passed into key word argument ``license_text``. license_text : str | None The text of a license agreement. Only used if ``accept`` is True. Returns ------- path : str Path to {name} dataset directory. """""" # import pooch library for handling the dataset downloading pooch = _soft_import('pooch', 'dataset downloading', strict=True) # extract configuration parameters this_dataset = MNE_DATASETS[name] config_key = this_dataset['config_key'] folder_name = this_dataset['folder_name'] # get download path for specific dataset path = _get_path(path=path, key=config_key, name=name) # get the actual path to each dataset folder name final_path = op.join(path, folder_name) # handle BrainStorm datasets with nested folders for datasets if name.startswith('bst_'): final_path = op.join(final_path, name) # additional condition: check for version.txt and parse it # check if testing or misc data is outdated; if so, redownload it want_version = RELEASES.get(name, None) want_version = _FAKE_VERSION if name == 'fake' else want_version # get the version of the dataset and then check if the version is outdated data_version = _dataset_version(final_path, name) outdated_dataset = want_version is not None and \ LooseVersion(want_version) > LooseVersion(data_version) if outdated_dataset: logger.info(f'Dataset {name} version {data_version} out of date, ' f'latest version is {want_version}') # return empty string if outdated dataset and we don't want # to download if (not force_update) and outdated_dataset and not download: return ('', data_version) if return_version else '' # reasons to bail early (hf_sef has separate code for this): if (not force_update) and (not outdated_dataset) and \ (not name.startswith('hf_sef_')): # if target folder exists (otherwise pooch downloads every time, # because we don't save the archive files after unpacking) if op.isdir(final_path): _do_path_update(path, update_path, config_key, name) return (final_path, data_version) if return_version else final_path # if download=False (useful for debugging) elif not download: return ('', data_version) if return_version else '' # if user didn't accept the license elif name.startswith('bst_'): if accept or '--accept-brainstorm-license' in sys.argv: answer = 'y' else: # If they don't have stdin, just accept the license # https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501 answer = _safe_input( '%sAgree (y/[n])? ' % _bst_license_text, use='y') if answer.lower() != 'y': raise RuntimeError('You must agree to the license to use this ' 'dataset') # downloader & processors progressbar = True if name == 'fake': progressbar = False downloader = pooch.HTTPDownloader(progressbar=progressbar) # use tqdm unzip = pooch.Unzip(extract_dir=path) # to unzip downloaded file untar = pooch.Untar(extract_dir=path) # to untar downloaded file # this processor handles nested tar files nested_untar = pooch.Untar(extract_dir=op.join(path, folder_name)) # create a map for each dataset name to its corresponding processor # Note: when adding a new dataset, a new line must be added here. processors = dict( bst_auditory=nested_untar, bst_phantom_ctf=nested_untar, bst_phantom_elekta=nested_untar, bst_raw=nested_untar, bst_resting=nested_untar, fake=untar, fieldtrip_cmc=nested_untar, kiloword=untar, misc=untar, mtrf=unzip, multimodal=untar, fnirs_motor=untar, opm=untar, sample=untar, somato=untar, spm=untar, testing=untar, visual_92_categories=untar, phantom_4dbti=unzip, refmeg_noise=unzip, hf_sef_raw=pooch.Untar( extract_dir=path, members=[f'hf_sef/{subdir}' for subdir in ('MEG', 'SSS', 'subjects')]), hf_sef_evoked=pooch.Untar( extract_dir=path, members=[f'hf_sef/{subdir}' for subdir in ('MEG', 'SSS', 'subjects')]), ssvep=unzip, erp_core=untar, epilepsy_ecog=untar, ) # construct the mapping needed by pooch from archive names # to urls pooch_urls = dict() # construct the mapping needed by pooch for the hash checking pooch_hash_mapping = dict() # handle case of multiple sub-datasets with different urls if name == 'visual_92_categories': names = [f'visual_92_categories_{n}' for n in (1, 2)] else: names = [name] # write all pooch urls for this_name in names: this_dataset = MNE_DATASETS[this_name] archive_name = this_dataset['archive_name'] dataset_url = this_dataset['url'] dataset_hash = this_dataset['hash'] # write to pooch url pooch_urls[archive_name] = dataset_url pooch_hash_mapping[archive_name] = dataset_hash # create the download manager fetcher = pooch.create( path=path, base_url='', # Full URLs are given in the `urls` dict. version=None, # Data versioning is decoupled from MNE-Python version. registry=None, # Registry is loaded from file, below. urls=pooch_urls, retry_if_failed=2 # 2 retries = 3 total attempts, ) # create temporary checksum registry with tempfile.TemporaryDirectory() as tmpdir: registry = op.join(tmpdir, 'dataset_checksums.txt') # write the md5 hashes with open(registry, 'w') as fout: for archive_name, dataset_hash in pooch_hash_mapping.items(): # write to the checksums file registry fout.write(f'{archive_name} {dataset_hash}\n') # load the checksum registry fetcher.load_registry(registry) # use our logger level for pooch's logger too pooch.get_logger().setLevel(logger.getEffectiveLevel()) for this_name in names: # fetch and unpack the data archive_name = MNE_DATASETS[this_name]['archive_name'] fetcher.fetch(fname=archive_name, downloader=downloader, processor=processors[name]) # after unpacking, remove the archive file os.remove(op.join(path, archive_name)) # remove version number from ""misc"" and ""testing"" datasets folder names if name == 'misc': rmtree(final_path, ignore_errors=True) os.replace(op.join(path, MISC_VERSIONED), final_path) elif name == 'testing': rmtree(final_path, ignore_errors=True) os.replace(op.join(path, TESTING_VERSIONED), final_path) # maybe update the config old_name = 'brainstorm' if name.startswith('bst_') else name _do_path_update(path, update_path, config_key, old_name) # compare the version of the dataset and mne data_version = _dataset_version(path, name) # 0.7 < 0.7.git should be False, therefore strip if check_version and (LooseVersion(data_version) < LooseVersion(mne_version.strip('.git'))): warn('The {name} dataset (version {current}) is older than ' 'mne-python (version {newest}). If the examples fail, ' 'you may need to update the {name} dataset by using ' 'mne.datasets.{name}.data_path(force_update=True)'.format( name=name, current=data_version, newest=mne_version)) return (final_path, data_version) if return_version else final_path ","def _data_path(path=None, force_update=False, update_path=True, download=True, name=None, check_version=False, return_version=False, accept=False): """"""Aux function. This is a general function for fetching MNE datasets. In order to define an MNE dataset, one needs to define a URL, archive name, folder name, and environment configuration key. They also need to define a ``pooch`` registry txt file mapping the dataset archive name to a hash (i.e. md5, or sha). Parameters ---------- path : None | str Location of where to look for the {name} dataset. If None, the environment variable or config parameter ``{conf}`` is used. If it doesn't exist, the ""~/mne_data"" directory is used. If the {name} dataset is not found under the given path, the data will be automatically downloaded to the specified folder. force_update : bool Force update of the {name} dataset even if a local copy exists. Default is False. update_path : bool | None If True (default), set the ``{conf}`` in mne-python config to the given path. If None, the user is prompted. download : bool If False and the {name} dataset has not been downloaded yet, it will not be downloaded and the path will be returned as '' (empty string). This is mostly used for debugging purposes and can be safely ignored by most users. name : str | None The name of the dataset, which should correspond to the URL, archive name, folder names and configuration key mappings for pooch. check_version : bool Whether to check the version of the dataset or not. Each version of the dataset is stored in the root with a ``version.txt`` file. return_version : bool Whether or not to return the version of the dataset or not. Defaults to False. accept : bool Some datasets require an acceptance of an additional license. Default to False. If this is True, then license text should be passed into key word argument ``license_text``. license_text : str | None The text of a license agreement. Only used if ``accept`` is True. Returns ------- path : str Path to {name} dataset directory. """""" # import pooch library for handling the dataset downloading pooch = _soft_import('pooch', 'dataset downloading', strict=True) # extract configuration parameters this_dataset = MNE_DATASETS[name] config_key = this_dataset['config_key'] folder_name = this_dataset['folder_name'] # get download path for specific dataset path = _get_path(path=path, key=config_key, name=name) # get the actual path to each dataset folder name final_path = op.join(path, folder_name) # handle BrainStorm datasets with nested folders for datasets if name.startswith('bst_'): final_path = op.join(final_path, name) # additional condition: check for version.txt and parse it # check if testing or misc data is outdated; if so, redownload it want_version = RELEASES.get(name, None) want_version = _FAKE_VERSION if name == 'fake' else want_version # get the version of the dataset and then check if the version is outdated data_version = _dataset_version(final_path, name) outdated_dataset = want_version is not None and \ LooseVersion(want_version) > LooseVersion(data_version) if outdated_dataset: logger.info(f'Dataset {name} version {data_version} out of date, ' f'latest version is {want_version}') # return empty string if outdated dataset and we don't want # to download if (not force_update) and outdated_dataset and not download: return ('', data_version) if return_version else '' # reasons to bail early (hf_sef has separate code for this): if (not force_update) and (not outdated_dataset) and \ (not name.startswith('hf_sef_')): # if target folder exists (otherwise pooch downloads every time, # because we don't save the archive files after unpacking) if op.isdir(final_path): _do_path_update(path, update_path, config_key, name) return (final_path, data_version) if return_version else final_path # if download=False (useful for debugging) elif not download: return ('', data_version) if return_version else '' # if user didn't accept the license elif name.startswith('bst_'): if accept or '--accept-brainstorm-license' in sys.argv: answer = 'y' else: # If they don't have stdin, just accept the license # https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501 answer = _safe_input( '%sAgree (y/[n])? ' % _bst_license_text, use='y') if answer.lower() != 'y': raise RuntimeError('You must agree to the license to use this ' 'dataset') # downloader & processors progressbar = True if name == 'fake': progressbar = False downloader = pooch.HTTPDownloader(progressbar=progressbar) # use tqdm unzip = pooch.Unzip(extract_dir=path) # to unzip downloaded file untar = pooch.Untar(extract_dir=path) # to untar downloaded file # this processor handles nested tar files nested_untar = pooch.Untar(extract_dir=op.join(path, folder_name)) # create a map for each dataset name to its corresponding processor # Note: when adding a new dataset, a new line must be added here. processors = dict( bst_auditory=nested_untar, bst_phantom_ctf=nested_untar, bst_phantom_elekta=nested_untar, bst_raw=nested_untar, bst_resting=nested_untar, fake=untar, fieldtrip_cmc=nested_untar, kiloword=untar, misc=untar, mtrf=unzip, multimodal=untar, fnirs_motor=untar, opm=untar, sample=untar, somato=untar, spm=untar, testing=untar, visual_92_categories=untar, phantom_4dbti=unzip, refmeg_noise=unzip, hf_sef_raw=pooch.Untar( extract_dir=path, members=[f'hf_sef/{subdir}' for subdir in ('MEG', 'SSS', 'subjects')]), hf_sef_evoked=pooch.Untar( extract_dir=path, members=[f'hf_sef/{subdir}' for subdir in ('MEG', 'SSS', 'subjects')]), ssvep=unzip, erp_core=untar, epilepsy_ecog=untar, ) # construct the mapping needed by pooch from archive names # to urls pooch_urls = dict() # construct the mapping needed by pooch for the hash checking pooch_hash_mapping = dict() # handle case of multiple sub-datasets with different urls if name == 'visual_92_categories': names = [f'visual_92_categories_{n}' for n in (1, 2)] else: names = [name] # write all pooch urls for this_name in names: this_dataset = MNE_DATASETS[this_name] archive_name = this_dataset['archive_name'] dataset_url = this_dataset['url'] dataset_hash = this_dataset['hash'] # write to pooch url pooch_urls[archive_name] = dataset_url pooch_hash_mapping[archive_name] = dataset_hash # create the download manager fetcher = pooch.create( path=path, base_url='', # Full URLs are given in the `urls` dict. version=None, # Data versioning is decoupled from MNE-Python version. registry=None, # Registry is loaded from file, below. urls=pooch_urls, retry_if_failed=2 # 2 retries = 3 total attempts ) # create temporary checksum registry with tempfile.TemporaryDirectory() as tmpdir: registry = op.join(tmpdir, 'dataset_checksums.txt') # write the md5 hashes with open(registry, 'w') as fout: for archive_name, dataset_hash in pooch_hash_mapping.items(): # write to the checksums file registry fout.write(f'{archive_name} {dataset_hash}\n') # load the checksum registry fetcher.load_registry(registry) # use our logger level for pooch's logger too pooch.get_logger().setLevel(logger.getEffectiveLevel()) for this_name in names: # fetch and unpack the data archive_name = MNE_DATASETS[this_name]['archive_name'] fetcher.fetch(fname=archive_name, downloader=downloader, processor=processors[name]) # after unpacking, remove the archive file os.remove(op.join(path, archive_name)) # remove version number from ""misc"" and ""testing"" datasets folder names if name == 'misc': rmtree(final_path, ignore_errors=True) os.replace(op.join(path, MISC_VERSIONED), final_path) elif name == 'testing': rmtree(final_path, ignore_errors=True) os.replace(op.join(path, TESTING_VERSIONED), final_path) # maybe update the config old_name = 'brainstorm' if name.startswith('bst_') else name _do_path_update(path, update_path, config_key, old_name) # compare the version of the dataset and mne data_version = _dataset_version(path, name) # 0.7 < 0.7.git should be False, therefore strip if check_version and (LooseVersion(data_version) < LooseVersion(mne_version.strip('.git'))): warn('The {name} dataset (version {current}) is older than ' 'mne-python (version {newest}). If the examples fail, ' 'you may need to update the {name} dataset by using ' 'mne.datasets.{name}.data_path(force_update=True)'.format( name=name, current=data_version, newest=mne_version)) return (final_path, data_version) if return_version else final_path " 33017,"def get_route_edge_attributes(G, route, attribute=None, minimize_key='length', retrieve_default=None): """""" Get a list of attribute values for each edge in a path. Parameters ---------- G : networkx multidigraph route : list list of nodes in the path attribute : string the name of the attribute to get the value of for each edge. If not specified, the list of dictionaries containing all attributes is returned. minimize_key : string if there are parallel edges between two nodes, select the one with the lowest value of minimize_key retrieve_default : Callable[Tuple[Any, Any], Any] Function called with the edge nodes as parameters to retrieve a default value, if the edge does not contain the given attribute. Per default, a `KeyError` is raised Returns ------- attribute_values : list list of edge attribute values """""" attribute_values = [] for u, v in zip(route[:-1], route[1:]): # if there are parallel edges between two nodes, select the one with the # lowest value of minimize_key data = min(G.get_edge_data(u, v).values(), key=lambda x: x[minimize_key]) if attribute is None: attribute_value = data elif retrieve_default is not None: attribute_value = data.get(attribute, retrieve_default(u, v)) else: attribute_value = data[attribute] attribute_values.append(attribute_value) return attribute_values ","def get_route_edge_attributes(G, route, attribute=None, minimize_key='length', retrieve_default=None): """""" Get a list of attribute values for each edge in a path. Parameters ---------- G : networkx multidigraph route : list list of nodes in the path attribute : string the name of the attribute to get the value of for each edge. If not specified, the complete data dict is returned for each edge. minimize_key : string if there are parallel edges between two nodes, select the one with the lowest value of minimize_key retrieve_default : Callable[Tuple[Any, Any], Any] Function called with the edge nodes as parameters to retrieve a default value, if the edge does not contain the given attribute. Per default, a `KeyError` is raised Returns ------- attribute_values : list list of edge attribute values """""" attribute_values = [] for u, v in zip(route[:-1], route[1:]): # if there are parallel edges between two nodes, select the one with the # lowest value of minimize_key data = min(G.get_edge_data(u, v).values(), key=lambda x: x[minimize_key]) if attribute is None: attribute_value = data elif retrieve_default is not None: attribute_value = data.get(attribute, retrieve_default(u, v)) else: attribute_value = data[attribute] attribute_values.append(attribute_value) return attribute_values " 51799,"def _reset(args): if not args.yes_to_all: msg = [ ""Bootstrapping configuration is being reset to Spack's defaults. "" ""Current configuration will be lost.\n"", ""Do you want to continue?"" ] ok_to_continue = llnl.util.tty.get_yes_or_no( ''.join(msg), default=True ) if not ok_to_continue: raise RuntimeError('Aborting') for scope in spack.config.config.file_scopes: if 'defaults' in scope.path: continue bootstrap_yaml = os.path.join(scope.path, 'bootstrap.yaml') if os.path.exists(bootstrap_yaml): os.remove(bootstrap_yaml) ","def _reset(args): if not args.yes_to_all: msg = [ ""Bootstrapping configuration is being reset to Spack's defaults. "" ""Current configuration will be lost.\n"", ""Do you want to continue?"" ] ok_to_continue = llnl.util.tty.get_yes_or_no( ''.join(msg), default=True ) if not ok_to_continue: raise RuntimeError('Aborting') for scope in spack.config.config.file_scopes: if 'defaults' in scope.path: continue spack.config.set('bootstrap', {}, scope=scope) if args.path: spack.config.set('bootstrap:root', args.path, scope=args.scope) root = spack.config.get('bootstrap:root', default=None, scope=args.scope) if root: root = spack.util.path.canonicalize_path(root) print(root) " 12366,"def maybe_b64decode(data): """"""base64 decode data If data is base64 encoded bytes, return b64decode(data). If not, return data unmodified. @param data: data as bytes. TypeError is raised if not bytes. """""" if not isinstance(data, bytes): raise TypeError(""data is '%s', expected bytes"" % type(data)) try: return base64.b64decode(data) except binascii.Error: return data ","def maybe_b64decode(data: bytes) -> bytes: """"""base64 decode data If data is base64 encoded bytes, return b64decode(data). If not, return data unmodified. @param data: data as bytes. TypeError is raised if not bytes. """""" if not isinstance(data, bytes): raise TypeError(""data is '%s', expected bytes"" % type(data)) try: return base64.b64decode(data) except binascii.Error: return data " 38026,"def data_kind(data, x=None, y=None, z=None, required_z=False): """""" Check what kind of data is provided to a module. Possible types: * a file name provided as 'data' * an xarray.DataArray provided as 'data' * a matrix provided as 'data' * 1D arrays x and y (and z, optionally) Arguments should be ``None`` if not used. If doesn't fit any of these categories (or fits more than one), will raise an exception. Parameters ---------- data : str or xarray.DataArray or {table-like} or None Pass in either a file name to an ASCII data table, an :class:`xarray.DataArray`, a 1D/2D {table-classes}. x/y : 1d arrays or None x and y columns as numpy arrays. z : 1d array or None z column as numpy array. To be used optionally when x and y are given. Returns ------- kind : str One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``. Examples -------- >>> import numpy as np >>> import xarray as xr >>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6])) 'vectors' >>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None) 'matrix' >>> data_kind(data=""my-data-file.txt"", x=None, y=None) 'file' >>> data_kind(data=xr.DataArray(np.random.rand(4, 3))) 'grid' """""" if data is None and x is None and y is None: raise GMTInvalidInput(""No input data provided."") if data is not None and (x is not None or y is not None or z is not None): raise GMTInvalidInput(""Too much data. Use either data or x and y."") if data is None and (x is None or y is None): raise GMTInvalidInput(""Must provided both x and y."") if data is None and required_z and (x is None or y is None or z is None): raise GMTInvalidInput(""Must provided both x, y, and z."") if isinstance(data, str): kind = ""file"" elif isinstance(data, xr.DataArray): kind = ""grid"" elif hasattr(data, ""__geo_interface__""): kind = ""geojson"" elif data is not None: kind = ""matrix"" else: kind = ""vectors"" return kind ","def data_kind(data, x=None, y=None, z=None, required_z=False): """""" Check what kind of data is provided to a module. Possible types: * a file name provided as 'data' * an xarray.DataArray provided as 'data' * a matrix provided as 'data' * 1D arrays x and y (and z, optionally) Arguments should be ``None`` if not used. If doesn't fit any of these categories (or fits more than one), will raise an exception. Parameters ---------- data : str or xarray.DataArray or {table-like} or None Pass in either a file name to an ASCII data table, an :class:`xarray.DataArray`, a 1D/2D {table-classes}. x/y : 1d arrays or None x and y columns as numpy arrays. z : 1d array or None z column as numpy array. To be used optionally when x and y are given. Returns ------- kind : str One of: ``'file'``, ``'grid'``, ``'matrix'``, ``'vectors'``. Examples -------- >>> import numpy as np >>> import xarray as xr >>> data_kind(data=None, x=np.array([1, 2, 3]), y=np.array([4, 5, 6])) 'vectors' >>> data_kind(data=np.arange(10).reshape((5, 2)), x=None, y=None) 'matrix' >>> data_kind(data=""my-data-file.txt"", x=None, y=None) 'file' >>> data_kind(data=xr.DataArray(np.random.rand(4, 3))) 'grid' """""" if data is None and x is None and y is None: raise GMTInvalidInput(""No input data provided."") if data is not None and (x is not None or y is not None or z is not None): raise GMTInvalidInput(""Too much data. Use either data or x and y."") if data is None and (x is None or y is None): raise GMTInvalidInput(""Must provide both x and y."") if data is None and required_z and (x is None or y is None or z is None): raise GMTInvalidInput(""Must provided both x, y, and z."") if isinstance(data, str): kind = ""file"" elif isinstance(data, xr.DataArray): kind = ""grid"" elif hasattr(data, ""__geo_interface__""): kind = ""geojson"" elif data is not None: kind = ""matrix"" else: kind = ""vectors"" return kind " 31785,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": str(timestamp), ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" args = demisto.args() params = demisto.params() api_key = params.get('apikey') api_key_id = params.get('apikey_id') base_url = urljoin(params['url'], '/public_api/v1') verify_cert = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: nonce = """".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)]) timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000) auth_key = ""%s%s%s"" % (api_key, nonce, timestamp) api_key_hash = hashlib.sha256(auth_key.encode(""utf-8"")).hexdigest() headers = { ""x-xdr-timestamp"": str(timestamp), ""x-xdr-nonce"": nonce, ""x-xdr-auth-id"": str(api_key_id), ""Authorization"": api_key_hash, } client = Client( base_url=base_url, verify=verify_cert, headers=headers, proxy=proxy) generic_commands = init_generic_commands() built_in_commands = init_built_in_commands() if command in generic_commands: return_results(generic_commands[command](client, args)) elif command in built_in_commands: return_results(get_built_in_query_results_polling_command(client, args)) else: raise NotImplementedError(f'Command {command} does not exist.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError: {str(e)}') finally: get_integration_context().clear() " 18257,"def _get_system_executables(): path = os.getenv('PATH') search_paths = path.split(os.pathsep) exe_to_path = {} # Reverse order of search directories so that an exe in the first PATH # entry overrides later entries for search_path in reversed(search_paths): for exe in os.listdir(search_path): exe_to_path[exe] = os.path.join(search_path, exe) return exe_to_path ","def _get_system_executables(): path = os.getenv('PATH') search_paths = path.split(os.pathsep) exe_to_path = {} # Reverse order of search directories so that an exe in the first PATH # entry overrides later entries for search_path in filter(os.path.exists, reversed(search_paths)): for exe in os.listdir(search_path): exe_to_path[exe] = os.path.join(search_path, exe) return exe_to_path " 31888,"def incremental_level_fetch(client: Client) -> list: """""" This method implements the incremental level of the feed. It checks if any updates have been made in the tags from the last time, and returns the updated tags. Args: client: Client object Returns: A list of tag details represents the tags that have been updated. """""" results: list = [] integration_context = get_integration_context() # This field saves tags that have been updated since the last time of fetch and need to be updated in demisto list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', '')) time_from_last_update = integration_context.get('time_of_first_fetch') # if there are such tags, we first get all of them and upload to demisto index_to_delete = 0 for tag in list_of_all_updated_tags: if len(results) < PAGE_SIZE: results.append(client.get_tag_details(tag.get('public_tag_name'))) index_to_delete += 1 else: context = get_integration_context() context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT) context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:] set_integration_context(context) return results page_num = 0 has_updates = True while has_updates: response = client.get_tags({'pageNum': page_num, 'pageSize': 200, 'sortBy': 'updated_at', 'order': 'desc'}) tags = response.get('tags', []) for tag in tags: update_time = tag.get('updated_at') update_time = datetime.strptime(update_time, AF_TAGS_DATE_FORMAT).strftime( DATE_FORMAT) if update_time else None update_time = date_to_timestamp(update_time, DATE_FORMAT) if update_time >= time_from_last_update: list_of_all_updated_tags.append( {'public_tag_name': tag.get('public_tag_name')}) else: has_updates = False break page_num += 1 # add only PAGE_SIZE tag_details to results, so we wont make to many calls to the api list_index = 0 for tag in list_of_all_updated_tags: if len(results) < PAGE_SIZE: public_tag_name = tag.get('public_tag_name') response = client.get_tag_details(public_tag_name) results.append(response) list_index += 1 else: break # delete from the list all tags that will be returned this fetch list_of_all_updated_tags = list_of_all_updated_tags[list_index:] # update integration context context = get_integration_context() context['tags_need_to_be_fetched'] = list_of_all_updated_tags context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT) set_integration_context(context) return results ","def incremental_level_fetch(client: Client) -> list: """""" This method implements the incremental level of the feed. It checks if any updates have been made in the tags from the last fetch time, and returns the updated tags. Args: client: Client object Returns: A list of tag details represents the tags that have been updated. """""" results: list = [] integration_context = get_integration_context() # This field saves tags that have been updated since the last time of fetch and need to be updated in demisto list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', '')) time_from_last_update = integration_context.get('time_of_first_fetch') # if there are such tags, we first get all of them and upload to demisto index_to_delete = 0 for tag in list_of_all_updated_tags: if len(results) < PAGE_SIZE: results.append(client.get_tag_details(tag.get('public_tag_name'))) index_to_delete += 1 else: context = get_integration_context() context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT) context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:] set_integration_context(context) return results page_num = 0 has_updates = True while has_updates: response = client.get_tags({'pageNum': page_num, 'pageSize': 200, 'sortBy': 'updated_at', 'order': 'desc'}) tags = response.get('tags', []) for tag in tags: update_time = tag.get('updated_at') update_time = datetime.strptime(update_time, AF_TAGS_DATE_FORMAT).strftime( DATE_FORMAT) if update_time else None update_time = date_to_timestamp(update_time, DATE_FORMAT) if update_time >= time_from_last_update: list_of_all_updated_tags.append( {'public_tag_name': tag.get('public_tag_name')}) else: has_updates = False break page_num += 1 # add only PAGE_SIZE tag_details to results, so we wont make to many calls to the api list_index = 0 for tag in list_of_all_updated_tags: if len(results) < PAGE_SIZE: public_tag_name = tag.get('public_tag_name') response = client.get_tag_details(public_tag_name) results.append(response) list_index += 1 else: break # delete from the list all tags that will be returned this fetch list_of_all_updated_tags = list_of_all_updated_tags[list_index:] # update integration context context = get_integration_context() context['tags_need_to_be_fetched'] = list_of_all_updated_tags context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT) set_integration_context(context) return results " 31238,"def main(): today_week_day = datetime.today().weekday() today_week_day = 0 if today_week_day == 6 else today_week_day + 1 total_seconds = 0 get_roles_response = demisto.executeCommand('getRoles', {}) if is_error(get_roles_response): demisto.error(f'Failed to get roles: {str(get_error(get_roles_response))}') else: roles = get_roles_response[0]['Contents'] for role in roles: shifts = role.get('shifts') or [] for shift in shifts: shift_from_day = shift.get('fromDay') shift_to_day = shift.get('toDay') if shift_from_day <= today_week_day and shift_to_day >= today_week_day: # get the time when the shift starts delta = shift_from_day - today_week_day shift_from = datetime.today() + timedelta(days=delta) shift_from = shift_from.replace(minute=shift.get('fromMinute'), hour=shift.get('fromHour'), second=0) # get the time when the shift ends delta = shift_to_day - today_week_day shift_to = datetime.today() + timedelta(days=delta) shift_to = shift_to.replace(minute=shift.get('toMinute'), hour=shift.get('toHour'), second=0) if shift_from < datetime.today() < shift_to: # found the current shift diff = shift_to - datetime.today() total_seconds = round(diff.total_seconds()) break widget = [{'name': '', 'data': [total_seconds]}] return_results(json.dumps(widget)) ","def main(): today_week_day = datetime.today().weekday() today_week_day = (today_week_day + 1) % 7 total_seconds = 0 get_roles_response = demisto.executeCommand('getRoles', {}) if is_error(get_roles_response): demisto.error(f'Failed to get roles: {str(get_error(get_roles_response))}') else: roles = get_roles_response[0]['Contents'] for role in roles: shifts = role.get('shifts') or [] for shift in shifts: shift_from_day = shift.get('fromDay') shift_to_day = shift.get('toDay') if shift_from_day <= today_week_day and shift_to_day >= today_week_day: # get the time when the shift starts delta = shift_from_day - today_week_day shift_from = datetime.today() + timedelta(days=delta) shift_from = shift_from.replace(minute=shift.get('fromMinute'), hour=shift.get('fromHour'), second=0) # get the time when the shift ends delta = shift_to_day - today_week_day shift_to = datetime.today() + timedelta(days=delta) shift_to = shift_to.replace(minute=shift.get('toMinute'), hour=shift.get('toHour'), second=0) if shift_from < datetime.today() < shift_to: # found the current shift diff = shift_to - datetime.today() total_seconds = round(diff.total_seconds()) break widget = [{'name': '', 'data': [total_seconds]}] return_results(json.dumps(widget)) " 37410,"def convert_su2_to_so3(matrix: np.ndarray) -> np.ndarray: """"""Computes SO(3)-matrix from input SU(2)-matrix. Args: matrix: The SU(2)-matrix for which a corresponding SO(3)-matrix needs to be computed. Returns: The SO(3)-matrix corresponding to ``matrix``. Raises: ValueError: if ``matrix`` is not an SU(2)-matrix. """""" if matrix.shape != (2, 2): raise ValueError( 'Conversion from SU2 called on matrix of shape', matrix.shape) if abs(np.linalg.det(matrix) - 1) > 1e-4: raise ValueError( 'Conversion from SU2 called on determinant of', np.linalg.det(matrix)) matrix = matrix.astype(np.complex) a = np.real(matrix[0][0]) b = np.imag(matrix[0][0]) c = -np.real(matrix[0][1]) d = -np.imag(matrix[0][1]) rotation = np.array([[a**2-b**2-c**2+d**2, 2*a*b+2*c*d, -2*a*c+2*b*d], [-2*a*b+2*c*d, a**2-b**2+c**2-d**2, 2*a*d+2*b*c], [2*a*c+2*b*d, 2*b*c-2*a*d, a**2+b**2-c**2-d**2]], dtype=float) return rotation ","def convert_su2_to_so3(matrix: np.ndarray) -> np.ndarray: """"""Computes SO(3)-matrix from input SU(2)-matrix. Args: matrix: The SU(2)-matrix for which a corresponding SO(3)-matrix needs to be computed. Returns: The SO(3)-matrix corresponding to ``matrix``. Raises: ValueError: if ``matrix`` is not an SU(2)-matrix. """""" if matrix.shape != (2, 2): raise ValueError( 'Conversion from SU2 called on matrix of shape', matrix.shape) if abs(np.linalg.det(matrix) - 1) > 1e-4: raise ValueError( 'Conversion from SU2 called on determinant of', np.linalg.det(matrix)) matrix = matrix.astype(complex) a = np.real(matrix[0][0]) b = np.imag(matrix[0][0]) c = -np.real(matrix[0][1]) d = -np.imag(matrix[0][1]) rotation = np.array([[a**2-b**2-c**2+d**2, 2*a*b+2*c*d, -2*a*c+2*b*d], [-2*a*b+2*c*d, a**2-b**2+c**2-d**2, 2*a*d+2*b*c], [2*a*c+2*b*d, 2*b*c-2*a*d, a**2+b**2-c**2-d**2]], dtype=float) return rotation " 55635,"def rgb_to_lab(image: torch.Tensor) -> torch.Tensor: r""""""Convert a RGB image to Lab. .. image:: _static/img/rgb_to_lab.png The image data is assumed to be in the range of :math:`[0, 1]`. Lab color is computed using the D65 illuminant and Observer 2. Args: image: RGB Image to be converted to Lab with shape :math:`(*, 3, H, W)`. Returns: Lab version of the image with shape :math:`(*, 3, H, W)`. The L channel values are in the range 0..100. a and b are in the range -127..127. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_lab(input) # 2x3x4x5 """""" if not isinstance(image, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(image)}"") if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError(f""Input size must have a shape of (*, 3, H, W). Got {image.shape}"") # Convert from sRGB to Linear RGB lin_rgb = rgb_to_linear_rgb(image) xyz_im: torch.Tensor = rgb_to_xyz(lin_rgb) # normalize for D65 white point xyz_ref_white = torch.tensor([0.95047, 1.0, 1.08883], device=xyz_im.device, dtype=xyz_im.dtype)[..., :, None, None] xyz_normalized = torch.div(xyz_im, xyz_ref_white) threshold = 0.008856 power = torch.pow(xyz_normalized.clamp(min=threshold), 1 / 3.0) scale = 7.787 * xyz_normalized + 4.0 / 29.0 xyz_int = torch.where(xyz_normalized > threshold, power, scale) x: torch.Tensor = xyz_int[..., 0, :, :] y: torch.Tensor = xyz_int[..., 1, :, :] z: torch.Tensor = xyz_int[..., 2, :, :] L: torch.Tensor = (116.0 * y) - 16.0 a: torch.Tensor = 500.0 * (x - y) _b: torch.Tensor = 200.0 * (y - z) out: torch.Tensor = torch.stack([L, a, _b], dim=-3) return out ","def rgb_to_lab(image: torch.Tensor) -> torch.Tensor: r""""""Convert a RGB image to Lab. .. image:: _static/img/rgb_to_lab.png The image data is assumed to be in the range of :math:`[0, 1]`. Lab color is computed using the D65 illuminant and Observer 2. Args: image: RGB Image to be converted to Lab with shape :math:`(*, 3, H, W)`. Returns: Lab version of the image with shape :math:`(*, 3, H, W)`. The L channel values are in the range 0..100. a and b are in the range -127..127. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_lab(input) # 2x3x4x5 """""" if not isinstance(image, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(image)}"") if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError(f""Input size must have a shape of (*, 3, H, W). Got {image.shape}"") # Convert from sRGB to Linear RGB lin_rgb = rgb_to_linear_rgb(image) xyz_im: torch.Tensor = rgb_to_xyz(lin_rgb) # normalize for D65 white point xyz_ref_white = torch.tensor([0.95047, 1.0, 1.08883], device=xyz_im.device, dtype=xyz_im.dtype)[..., :, None, None] xyz_normalized = torch.div(xyz_im, xyz_ref_white) threshold = 0.008856 power = torch.pow(xyz_normalized.clamp(min=threshold), 1 / 3.0) scale = 7.787 * xyz_normalized + 4.0 / 29.0 xyz_int = torch.where(xyz_normalized > threshold, power, scale) x: torch.Tensor = xyz_int[..., 0, :, :] y: torch.Tensor = xyz_int[..., 1, :, :] z: torch.Tensor = xyz_int[..., 2, :, :] L: torch.Tensor = (116.0 * y) - 16.0 a: torch.Tensor = 500.0 * (x - y) _b: torch.Tensor = 200.0 * (y - z) out: torch.Tensor = torch.stack([L, a, _b], dim=-3) return out " 3224,"def get_filter(query=None, params=None): """""" Returns an eventstore filter given the search text provided by the user and URL params """""" # NOTE: this function assumes project permissions check already happened parsed_terms = [] if query is not None: try: parsed_terms = parse_search_query(query) except ParseError as e: raise InvalidSearchQuery(u""Parse error: %r (column %d)"" % (e.expr.name, e.column())) kwargs = { ""start"": None, ""end"": None, ""conditions"": [], ""having"": [], ""project_ids"": [], ""group_ids"": [], } def get_projects(params): return { p[""slug""]: p[""id""] for p in Project.objects.filter(id__in=params.get(""project_id"", [])).values( ""id"", ""slug"" ) } def to_list(value): if isinstance(value, list): return value return [value] projects = None for term in parsed_terms: if isinstance(term, SearchFilter): name = term.key.name if name in (PROJECT_NAME_ALIAS, PROJECT_ALIAS): if projects is None: projects = get_projects(params) project = projects.get(term.value.value) if not project: raise InvalidSearchQuery( ""Invalid query. Project %s must exist and be in global header"" % term.value.value ) condition = [""project_id"", ""="", projects.get(term.value.value)] kwargs[""conditions""].append(condition) elif name == ""issue.id"" and term.value.value != """": # A blank term value means that this is a has filter kwargs[""group_ids""].extend(to_list(term.value.value)) elif name == ""issue"" and term.value.value != """": if params and ""organization_id"" in params: try: group = Group.objects.by_qualified_short_id( params[""organization_id""], term.value.value ) kwargs[""group_ids""].extend(to_list(group.id)) except Exception: raise InvalidSearchQuery( u""Invalid value '{}' for 'issue:' filter"".format(term.value.value) ) elif name in FIELD_ALIASES: converted_filter = convert_aggregate_filter_to_snuba_query(term, True) if converted_filter: kwargs[""having""].append(converted_filter) else: converted_filter = convert_search_filter_to_snuba_query(term) if converted_filter: kwargs[""conditions""].append(converted_filter) elif isinstance(term, AggregateFilter): converted_filter = convert_aggregate_filter_to_snuba_query(term, False) if converted_filter: kwargs[""having""].append(converted_filter) # Keys included as url params take precedent if same key is included in search # They are also considered safe and to have had access rules applied unlike conditions # from the query string. if params: for key in (""start"", ""end""): kwargs[key] = params.get(key, None) # OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids if ""project_id"" in params: kwargs[""project_ids""] = params[""project_id""] if ""environment"" in params: term = SearchFilter(SearchKey(""environment""), ""="", SearchValue(params[""environment""])) kwargs[""conditions""].append(convert_search_filter_to_snuba_query(term)) if ""group_ids"" in params: kwargs[""group_ids""] = to_list(params[""group_ids""]) # Deprecated alias, use `group_ids` instead if ""issue.id"" in params: kwargs[""group_ids""] = to_list(params[""issue.id""]) return eventstore.Filter(**kwargs) ","def get_filter(query=None, params=None): """""" Returns an eventstore filter given the search text provided by the user and URL params """""" # NOTE: this function assumes project permissions check already happened parsed_terms = [] if query is not None: try: parsed_terms = parse_search_query(query) except ParseError as e: raise InvalidSearchQuery(u""Parse error: %r (column %d)"" % (e.expr.name, e.column())) kwargs = { ""start"": None, ""end"": None, ""conditions"": [], ""having"": [], ""project_ids"": [], ""group_ids"": [], } def get_projects(params): return { p[""slug""]: p[""id""] for p in Project.objects.filter(id__in=params.get(""project_id"", [])).values( ""id"", ""slug"" ) } def to_list(value): if isinstance(value, list): return value return [value] projects = None for term in parsed_terms: if isinstance(term, SearchFilter): name = term.key.name if name in (PROJECT_NAME_ALIAS, PROJECT_ALIAS): if projects is None: projects = get_projects(params) project = projects.get(term.value.value) if not project: raise InvalidSearchQuery( ""Invalid query. Project %s does not exist or is not an actively selected project."" % term.value.value ) condition = [""project_id"", ""="", projects.get(term.value.value)] kwargs[""conditions""].append(condition) elif name == ""issue.id"" and term.value.value != """": # A blank term value means that this is a has filter kwargs[""group_ids""].extend(to_list(term.value.value)) elif name == ""issue"" and term.value.value != """": if params and ""organization_id"" in params: try: group = Group.objects.by_qualified_short_id( params[""organization_id""], term.value.value ) kwargs[""group_ids""].extend(to_list(group.id)) except Exception: raise InvalidSearchQuery( u""Invalid value '{}' for 'issue:' filter"".format(term.value.value) ) elif name in FIELD_ALIASES: converted_filter = convert_aggregate_filter_to_snuba_query(term, True) if converted_filter: kwargs[""having""].append(converted_filter) else: converted_filter = convert_search_filter_to_snuba_query(term) if converted_filter: kwargs[""conditions""].append(converted_filter) elif isinstance(term, AggregateFilter): converted_filter = convert_aggregate_filter_to_snuba_query(term, False) if converted_filter: kwargs[""having""].append(converted_filter) # Keys included as url params take precedent if same key is included in search # They are also considered safe and to have had access rules applied unlike conditions # from the query string. if params: for key in (""start"", ""end""): kwargs[key] = params.get(key, None) # OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids if ""project_id"" in params: kwargs[""project_ids""] = params[""project_id""] if ""environment"" in params: term = SearchFilter(SearchKey(""environment""), ""="", SearchValue(params[""environment""])) kwargs[""conditions""].append(convert_search_filter_to_snuba_query(term)) if ""group_ids"" in params: kwargs[""group_ids""] = to_list(params[""group_ids""]) # Deprecated alias, use `group_ids` instead if ""issue.id"" in params: kwargs[""group_ids""] = to_list(params[""issue.id""]) return eventstore.Filter(**kwargs) " 9068,"def _receive_cap_ack(bot: SopelWrapper, trigger: Trigger) -> None: was_completed = bot.cap_requests.is_complete cap_ack: Tuple[str, ...] = bot.capabilities.handle_ack(bot, trigger) try: result: Optional[ List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]] ] = bot.cap_requests.acknowledge(bot, cap_ack) except config.ConfigurationError as error: LOGGER.error( 'Configuration error on ACK capability ""%s"": %s', ', '.join(cap_ack), error, ) bot.write(('CAP', 'END')) # close negotiation now bot.quit('Wrong configuration.') return None except Exception as error: LOGGER.exception( 'Error on ACK capability ""%s"": %s', ', '.join(cap_ack), error, ) bot.write(('CAP', 'END')) # close negotiation now bot.quit('Error negotiating capabilities.') return None if result is None: # a plugin may have request the capability without using the proper # interface: ignore return None _handle_cap_acknowledgement(bot, cap_ack, result, was_completed) ","def _receive_cap_ack(bot: SopelWrapper, trigger: Trigger) -> None: was_completed = bot.cap_requests.is_complete cap_ack: Tuple[str, ...] = bot.capabilities.handle_ack(bot, trigger) try: result: Optional[ List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]] ] = bot.cap_requests.acknowledge(bot, cap_ack) except config.ConfigurationError as error: LOGGER.error( 'Configuration error on ACK capability ""%s"": %s', ', '.join(cap_ack), error, ) bot.write(('CAP', 'END')) # close negotiation now bot.quit('Wrong configuration.') return None except Exception as error: LOGGER.exception( 'Error on ACK capability ""%s"": %s', ', '.join(cap_ack), error, ) bot.write(('CAP', 'END')) # close negotiation now bot.quit('Error negotiating capabilities.') return None if result is None: # a plugin may have requested the capability without using the proper # interface: ignore return None _handle_cap_acknowledgement(bot, cap_ack, result, was_completed) " 57800,"def validate_get_int(max_results: Optional[str], message: str, limit: Union[int, str] = 0) -> Optional[int]: """""" Validate and convert string max_results to integer. :param max_results: max results in string. :param message: Message to display when exception raised. :param limit: If max_results > limit raise the exception. :return: int max_results :raises ValueError: if max_results is not a integer and < 0. """""" if max_results: try: max_results_int = int(max_results) if max_results_int <= 0: raise ValueError if limit and max_results_int > int(limit): raise ValueError return max_results_int except ValueError: raise ValueError(message) return None ","def validate_get_int(max_results: Optional[str], message: str, limit: Union[int, str] = 0) -> Optional[int]: """""" Validate and convert string max_results to integer. :param max_results: max results in string. :param message: Message to display when exception raised. :param limit: If max_results > limit raise the exception. :return: int max_results :raises ValueError: if max_results is not a integer and < 0. """""" if max_results: try: max_results_int = int(max_results) if max_results_int <= 0: raise ValueError(message) if limit and max_results_int > int(limit): raise ValueError(message) return max_results_int except ValueError: raise ValueError(message) return None " 32358,"def main(): params = demisto.params() aws_default_region = params.get('defaultRegion') aws_role_arn = params.get('roleArn') aws_role_session_name = params.get('roleSessionName') aws_role_session_duration = params.get('sessionDuration') aws_role_policy = None aws_access_key_id = params.get('access_key') aws_secret_access_key = params.get('secret_key') verify_certificate = not params.get('insecure', True) timeout = params.get('timeout') retries = params.get('retries') or 5 aws_queue_url = params.get('queueUrl') max_fetch = min(int(params.get('max_fetch', 10)), 100) parse_body_as_json = params.get('parse_body_as_json', False) commands = { 'aws-sqs-get-queue-url': get_queue_url, 'aws-sqs-list-queues': list_queues, 'aws-sqs-send-message': send_message, 'aws-sqs-create-queue': create_queue, 'aws-sqs-delete-queue': delete_queue, 'aws-sqs-purge-queue': purge_queue } try: validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id, aws_secret_access_key) aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration, aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout, retries) command = demisto.command() args = demisto.args() demisto.debug('Command being called is {}'.format(command)) if command == 'test-module': return_results(test_function(aws_client)) elif demisto.command() == 'fetch-incidents': fetch_incidents(aws_client, aws_queue_url, max_fetch, parse_body_as_json) sys.exit(0) elif command in commands: client = aws_client.aws_session( service='sqs', region=args.get('region'), role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration')) return_results(commands[command](args, client)) else: raise NotImplementedError('{} is not an existing AWS-SQS command'.format(command)) except Exception as e: return_error(""Failed to execute {} command.\nError:\n{}"".format(demisto.command(), str(e))) ","def main(): params = demisto.params() aws_default_region = params.get('defaultRegion') aws_role_arn = params.get('roleArn') aws_role_session_name = params.get('roleSessionName') aws_role_session_duration = params.get('sessionDuration') aws_role_policy = None aws_access_key_id = params.get('access_key') aws_secret_access_key = params.get('secret_key') verify_certificate = not params.get('insecure', True) timeout = params.get('timeout') retries = params.get('retries') or 5 aws_queue_url = params.get('queueUrl') max_fetch = min(arg_to_number(params.get('max_fetch', 10)), 100) parse_body_as_json = params.get('parse_body_as_json', False) commands = { 'aws-sqs-get-queue-url': get_queue_url, 'aws-sqs-list-queues': list_queues, 'aws-sqs-send-message': send_message, 'aws-sqs-create-queue': create_queue, 'aws-sqs-delete-queue': delete_queue, 'aws-sqs-purge-queue': purge_queue } try: validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id, aws_secret_access_key) aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration, aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout, retries) command = demisto.command() args = demisto.args() demisto.debug('Command being called is {}'.format(command)) if command == 'test-module': return_results(test_function(aws_client)) elif demisto.command() == 'fetch-incidents': fetch_incidents(aws_client, aws_queue_url, max_fetch, parse_body_as_json) sys.exit(0) elif command in commands: client = aws_client.aws_session( service='sqs', region=args.get('region'), role_arn=args.get('roleArn'), role_session_name=args.get('roleSessionName'), role_session_duration=args.get('roleSessionDuration')) return_results(commands[command](args, client)) else: raise NotImplementedError('{} is not an existing AWS-SQS command'.format(command)) except Exception as e: return_error(""Failed to execute {} command.\nError:\n{}"".format(demisto.command(), str(e))) " 11843,"def get_display_profile(handle=None): """""" (experimental) Fetches the profile for the current display device. :returns: None if the profile is not known. """""" if sys.platform != ""win32"": return from PIL import ImageWin if isinstance(handle, ImageWin.HDC): profile = core.get_display_profile_win32(handle, 1) else: profile = core.get_display_profile_win32(handle or 0) return ImageCmsProfile(profile) ","def get_display_profile(handle=None): """""" (experimental) Fetches the profile for the current display device. :returns: None if the profile is not known. """""" if sys.platform != ""win32"": return None from PIL import ImageWin if isinstance(handle, ImageWin.HDC): profile = core.get_display_profile_win32(handle, 1) else: profile = core.get_display_profile_win32(handle or 0) return ImageCmsProfile(profile) " 11282,"def _get_refresh_token(service_name, account_name): if not _libsecret: return None err = ct.c_int() attribute1 = _SECRET_SCHEMA_ATTRIBUTE() setattr(attribute1, ""name"", _c_str(""service"")) setattr(attribute1, ""type"", 0) attribute2 = _SECRET_SCHEMA_ATTRIBUTE() setattr(attribute2, ""name"", _c_str(""account"")) setattr(attribute2, ""type"", 0) attributes = [attribute1, attribute2] pattributes = (_SECRET_SCHEMA_ATTRIBUTE * 2)(*attributes) schema = _SECRET_SCHEMA() pschema = _PSECRET_SCHEMA(schema) ct.memset(pschema, 0, ct.sizeof(schema)) setattr(schema, ""name"", _c_str(""org.freedesktop.Secret.Generic"")) setattr(schema, ""flags"", 2) setattr(schema, ""attributes"", pattributes) p_str = _libsecret.secret_password_lookup_sync( pschema, None, ct.byref(err), _c_str(""service""), _c_str(service_name), _c_str(""account""), _c_str(account_name), None, ) if err.value == 0: return p_str.decode(""utf-8"") return None ","def _get_refresh_token(service_name, account_name): if not _libsecret: return None err = ct.c_int() attributes = [_SECRET_SCHEMA_ATTRIBUTE(_c_str(""service""), 0), _SECRET_SCHEMA_ATTRIBUTE(_c_str(""account""), 0)] pattributes = (_SECRET_SCHEMA_ATTRIBUTE * 2)(*attributes) schema = _SECRET_SCHEMA() pschema = _PSECRET_SCHEMA(schema) ct.memset(pschema, 0, ct.sizeof(schema)) setattr(schema, ""name"", _c_str(""org.freedesktop.Secret.Generic"")) setattr(schema, ""flags"", 2) setattr(schema, ""attributes"", pattributes) p_str = _libsecret.secret_password_lookup_sync( pschema, None, ct.byref(err), _c_str(""service""), _c_str(service_name), _c_str(""account""), _c_str(account_name), None, ) if err.value == 0: return p_str.decode(""utf-8"") return None " 48789,"def test_cycle(dag): """""" A wrapper function of check_cycle for backward compatibility reason. New code should use check_cycle instead, since this function name test_cycle starts with test_ and pytest will consider it as a unit test causing failure. """""" from warnings import warn warn(""Deprecated, please use check_cycle at the same module instead."", DeprecationWarning) return check_cycle(dag) ","def test_cycle(dag): """""" A wrapper function of check_cycle for backward compatibility reason. New code should use check_cycle instead. Since the function test_cycle starts with test_, pytest will consider it as a unit test and cause failure. """""" from warnings import warn warn(""Deprecated, please use check_cycle at the same module instead."", DeprecationWarning) return check_cycle(dag) " 12703,"def test_metadata_round_trip() -> None: val = ""help_i_am_trapped_inside_a_unit_test_string"" constraints = InterpreterConstraints( [""this"", ""is"", ""just"", ""to"" ""test>=3.0"", ""parsing!=9.9999999,<10"", ""semantics==93""] ) metadata = lockfile_metadata_header(val, [str(i) for i in constraints]) print(metadata.decode(""ascii"")) output = read_lockfile_metadata(metadata) assert val == output.invalidation_digest assert constraints == output.valid_interpreter_constraints ","def test_metadata_round_trip() -> None: val = ""help_i_am_trapped_inside_a_unit_test_string"" constraints = InterpreterConstraints( [""CPython==2.7.*"", ""PyPy"", ""CPython>=3.6,<4,!=3.7.*""] ) metadata = lockfile_metadata_header(val, [str(i) for i in constraints]) print(metadata.decode(""ascii"")) output = read_lockfile_metadata(metadata) assert val == output.invalidation_digest assert constraints == output.valid_interpreter_constraints " 42086,"def _get_contour_plot( study: Study, params: Optional[List[str]] = None, target: Optional[Callable[[FrozenTrial], float]] = None, target_name: str = ""Objective Value"", ) -> ""Axes"": # Calculate basic numbers for plotting. trials = _filter_nonfinite( [trial for trial in study.trials if trial.state == TrialState.COMPLETE], target=target ) if len(trials) == 0: _logger.warning(""Your study does not have any completed trials."") _, ax = plt.subplots() return ax all_params = {p_name for t in trials for p_name in t.params.keys()} if params is None: sorted_params = sorted(all_params) elif len(params) <= 1: _logger.warning(""The length of params must be greater than 1."") _, ax = plt.subplots() return ax else: for input_p_name in params: if input_p_name not in all_params: raise ValueError(""Parameter {} does not exist in your study."".format(input_p_name)) sorted_params = sorted(set(params)) n_params = len(sorted_params) plt.style.use(""ggplot"") # Use ggplot style sheet for similar outputs to plotly. if n_params == 2: # Set up the graph style. fig, axs = plt.subplots() axs.set_title(""Contour Plot"") cmap = _set_cmap(study, target) contour_point_num = 100 # Prepare data and draw contour plots. if params: x_param = params[0] y_param = params[1] else: x_param = sorted_params[0] y_param = sorted_params[1] cs = _generate_contour_subplot( trials, x_param, y_param, axs, cmap, contour_point_num, target ) if isinstance(cs, ContourSet): axcb = fig.colorbar(cs) axcb.set_label(target_name) else: # Set up the graph style. fig, axs = plt.subplots(n_params, n_params) fig.suptitle(""Contour Plot"") cmap = _set_cmap(study, target) contour_point_num = 100 # Prepare data and draw contour plots. cs_list = [] for x_i, x_param in enumerate(sorted_params): for y_i, y_param in enumerate(sorted_params): ax = axs[y_i, x_i] cs = _generate_contour_subplot( trials, x_param, y_param, ax, cmap, contour_point_num, target ) if isinstance(cs, ContourSet): cs_list.append(cs) if cs_list: axcb = fig.colorbar(cs_list[0], ax=axs) axcb.set_label(target_name) return axs ","def _get_contour_plot( study: Study, params: Optional[List[str]] = None, target: Optional[Callable[[FrozenTrial], float]] = None, target_name: str = ""Objective Value"", ) -> ""Axes"": # Calculate basic numbers for plotting. trials = _filter_nonfinite( study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target ) if len(trials) == 0: _logger.warning(""Your study does not have any completed trials."") _, ax = plt.subplots() return ax all_params = {p_name for t in trials for p_name in t.params.keys()} if params is None: sorted_params = sorted(all_params) elif len(params) <= 1: _logger.warning(""The length of params must be greater than 1."") _, ax = plt.subplots() return ax else: for input_p_name in params: if input_p_name not in all_params: raise ValueError(""Parameter {} does not exist in your study."".format(input_p_name)) sorted_params = sorted(set(params)) n_params = len(sorted_params) plt.style.use(""ggplot"") # Use ggplot style sheet for similar outputs to plotly. if n_params == 2: # Set up the graph style. fig, axs = plt.subplots() axs.set_title(""Contour Plot"") cmap = _set_cmap(study, target) contour_point_num = 100 # Prepare data and draw contour plots. if params: x_param = params[0] y_param = params[1] else: x_param = sorted_params[0] y_param = sorted_params[1] cs = _generate_contour_subplot( trials, x_param, y_param, axs, cmap, contour_point_num, target ) if isinstance(cs, ContourSet): axcb = fig.colorbar(cs) axcb.set_label(target_name) else: # Set up the graph style. fig, axs = plt.subplots(n_params, n_params) fig.suptitle(""Contour Plot"") cmap = _set_cmap(study, target) contour_point_num = 100 # Prepare data and draw contour plots. cs_list = [] for x_i, x_param in enumerate(sorted_params): for y_i, y_param in enumerate(sorted_params): ax = axs[y_i, x_i] cs = _generate_contour_subplot( trials, x_param, y_param, ax, cmap, contour_point_num, target ) if isinstance(cs, ContourSet): cs_list.append(cs) if cs_list: axcb = fig.colorbar(cs_list[0], ax=axs) axcb.set_label(target_name) return axs " 3549,"def _get_doc_content(project, version, doc): storage_path = project.get_storage_path( 'json', version_slug=version.slug, include_file=False, version_type=version.type, ) file_path = build_media_storage.join(storage_path, f'{doc}.fjson') try: with build_media_storage.open(file_path) as file: file_contents = file.read() return json.loads(file_contents) except Exception: # noqa log.warning('Unable to read file: %s', file_path) return None ","def _get_doc_content(project, version, doc): storage_path = project.get_storage_path( 'json', version_slug=version.slug, include_file=False, version_type=version.type, ) file_path = build_media_storage.join(storage_path, f'{doc}.fjson') try: with build_media_storage.open(file_path) as file: return json.load(file) except Exception: # noqa log.warning('Unable to read file: %s', file_path) return None " 27383,"def produce_grid( tuple_of_limits: Tuple[float, ...], grid_spacing: float ) -> np.ndarray: """"""Produce a 3D grid for the simulation system. The partitioning is based on the tuple of Cartesian Coordinate limits calculated in an earlier step. Parameters ---------- tuple_of_limits : tuple ``x_min, x_max, y_min, y_max, z_min, z_max`` grid_spacing : float grid size in all directions in ångström Returns ------- grid : array ``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing]`` """""" x_min, x_max, y_min, y_max, z_min, z_max = tuple_of_limits grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing] return grid ","def produce_grid( tuple_of_limits: Tuple[int, int, int, int, int, int], grid_spacing: float ) -> np.ndarray: """"""Produce a 3D grid for the simulation system. The partitioning is based on the tuple of Cartesian Coordinate limits calculated in an earlier step. Parameters ---------- tuple_of_limits : tuple ``x_min, x_max, y_min, y_max, z_min, z_max`` grid_spacing : float grid size in all directions in ångström Returns ------- grid : array ``numpy.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing]`` """""" x_min, x_max, y_min, y_max, z_min, z_max = tuple_of_limits grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing] return grid " 40109,"def load_information(file_path): dir_checksec = Path(__file__).parent.parent print(str(dir_checksec)) shell_skript = dir_checksec/'shell_skript/checksec' print(str(shell_skript)) install_shell_skript = dir_checksec/'install.sh' print(str(install_shell_skript)) if not shell_skript.exists(): execute_shell_command([str(install_shell_skript)]) json_file_information = execute_shell_command(str(shell_skript) + ' --file=' + str(file_path) + ' --format=json --extended') dict_file_information = json.loads(json_file_information) return dict_file_information ","def load_information(file_path): dir_checksec = Path(__file__).parent.parent print(str(dir_checksec)) shell_skript = dir_checksec/'shell_skript/checksec' print(str(shell_skript)) install_shell_skript = dir_checksec/'install.sh' print(str(install_shell_skript)) if not shell_skript.exists(): execute_shell_command([str(install_shell_skript)]) json_file_information = execute_shell_command(f'{shell_skript} --file={file_path} --format=json --extended') dict_file_information = json.loads(json_file_information) return dict_file_information " 30664,"def get_threat_detail(threats): """""" Iterate over threat details from the response and retrieve details of threats. :param threats: list of threats from response :return: list of detailed elements of threats :rtype: list """""" return [{ 'Title': threat.get('title', ''), 'Category': threat.get('category', ''), 'Severity': threat.get('severity', ''), 'Description': threat.get('description', ''), 'Cve': threat.get('cves', ''), 'Source': threat.get('source', ''), 'Published': threat.get('published', ''), 'Updated': threat.get('updated', ''), 'ThreatLastTrendingOn': threat.get('threatLastTrendingOn', ''), 'Trending': threat.get('trending', '') } for threat in threats] ","def get_threat_detail(threats): """""" Iterate over threat details from the response and retrieve details of threats. :param threats: list of threats from response :return: list of detailed elements of threats :rtype: list """""" return [{ 'Title': threat.get('title', ''), 'Category': threat.get('category', ''), 'Severity': threat.get('severity', ''), 'Description': threat.get('description', ''), 'Cve': threat.get('cves', []), 'Source': threat.get('source', ''), 'Published': threat.get('published', ''), 'Updated': threat.get('updated', ''), 'ThreatLastTrendingOn': threat.get('threatLastTrendingOn', ''), 'Trending': threat.get('trending', '') } for threat in threats] " 7369,"def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """""" Masked normalized cross-correlation between arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out ","def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), overlap_ratio=0.3): """"""Return the normalized cross-correlation between two masked arrays. Parameters ---------- arr1 : ndarray First array. arr2 : ndarray Seconds array. The dimensions of `arr2` along axes that are not transformed should be equal to that of `arr1`. m1 : ndarray Mask of `arr1`. The mask should evaluate to `True` (or 1) on valid pixels. `m1` should have the same shape as `arr1`. m2 : ndarray Mask of `arr2`. The mask should evaluate to `True` (or 1) on valid pixels. `m2` should have the same shape as `arr2`. mode : {'full', 'same'}, optional 'full': This returns the convolution at each point of overlap. At the end-points of the convolution, the signals do not overlap completely, and boundary effects may be seen. 'same': The output is the same size as `arr1`, centered with respect to the `‘full’` output. Boundary effects are less prominent. axes : tuple of ints, optional Axes along which to compute the cross-correlation. overlap_ratio : float, optional Minimum allowed overlap ratio between images. The correlation for translations corresponding with an overlap ratio lower than this threshold will be ignored. A lower `overlap_ratio` leads to smaller maximum translation, while a higher `overlap_ratio` leads to greater robustness against spurious matches due to small overlap between masked images. Returns ------- out : ndarray Masked normalized cross-correlation. Raises ------ ValueError : if correlation `mode` is not valid, or array dimensions along non-transformation axes are not equal. References ---------- .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain. IEEE Transactions on Image Processing, vol. 21(5), pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402` .. [2] D. Padfield. ""Masked FFT registration"". In Proc. Computer Vision and Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """""" if mode not in {'full', 'same'}: raise ValueError(f""Correlation mode '{mode}' is not valid."") fixed_image = np.asarray(arr1) moving_image = np.asarray(arr2) float_dtype = _supported_float_type( [fixed_image.dtype, moving_image.dtype] ) if float_dtype.kind == 'c': raise ValueError(""complex-valued arr1, arr2 are not supported"") fixed_image = fixed_image.astype(float_dtype) fixed_mask = np.array(m1, dtype=bool) moving_image = moving_image.astype(float_dtype) moving_mask = np.array(m2, dtype=bool) eps = np.finfo(float_dtype).eps # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) for axis in (all_axes - set(axes)): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( f'Array shapes along non-transformation axes should be ' f'equal, but dimensions along axis {axis} are not.') # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly # larger shape (`fast_shape`). Then, after all fourier transforms are done, # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: final_shape[axis] = fixed_image.shape[axis] + \ moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or # 7) fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes]) # We use the new scipy.fft because they allow leaving the transform axes # unchanged which was not possible with scipy.fftpack's # fftn/ifftn in older versions of SciPy. # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4) # results in arr_fft shape (4, 4, 7) fft = partial(fftmodule.fftn, s=fast_shape, axes=axes) _ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes) def ifft(x): return _ifft(x).real fixed_image[np.logical_not(fixed_mask)] = 0.0 moving_image[np.logical_not(moving_mask)] = 0.0 # N-dimensional analog to rotation by 180deg is flip over all # relevant axes. # See [1] for discussion. rotated_moving_image = _flip(moving_image, axes=axes) rotated_moving_mask = _flip(moving_mask, axes=axes) fixed_fft = fft(fixed_image) rotated_moving_fft = fft(rotated_moving_image) fixed_mask_fft = fft(fixed_mask.astype(float_dtype)) rotated_moving_mask_fft = fft(rotated_moving_mask.astype(float_dtype)) # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. number_overlap_masked_px = ifft(rotated_moving_mask_fft * fixed_mask_fft) number_overlap_masked_px[:] = np.round(number_overlap_masked_px) number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) masked_correlated_rotated_moving_fft = ifft( fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) numerator -= masked_correlated_fixed_fft * \ masked_correlated_rotated_moving_fft / number_overlap_masked_px fixed_squared_fft = fft(np.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) fixed_denom -= np.square(masked_correlated_fixed_fft) / \ number_overlap_masked_px fixed_denom[:] = np.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(np.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \ number_overlap_masked_px moving_denom[:] = np.fmax(moving_denom, 0.0) denom = np.sqrt(fixed_denom * moving_denom) # Slice back to expected convolution shape. numerator = numerator[final_slice] denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] if mode == 'same': _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) # Pixels where `denom` is very small will introduce large # numbers after division. To get around this problem, # we zero-out problematic pixels. tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True) nonzero_indices = denom > tol # explicitly set out dtype for compatibility with SciPy < 1.4, where # fftmodule will be numpy.fft which always uses float64 dtype. out = np.zeros_like(denom, dtype=float_dtype) out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices] np.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, axis=axes, keepdims=True) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out " 30723,"def test_module(): """"""Test module to verify settings """""" errors = list() data = dict() if TENANT_ID == '0000000-0000-0000-000000000' or TENANT_ID == '': errors.append('Incorrect tenant id') if APIKEY == '' or APIKEY == '': errors.append('Incorrect API key') if SOARTOKEN == '' or SOARTOKEN == '': errors.append('Please set proper SOAR token') if str(DAYS_BACK).isdigit(): if int(DAYS_BACK) <= 0 or int(DAYS_BACK) > 100: errors.append('DAYS_BACK must be in range > 0 and <= 100') else: errors.append('DAYS_BACK has to be an integer') if str(ITEMS_TO_FETCH).isdigit(): if int(ITEMS_TO_FETCH) <= 0 or int(ITEMS_TO_FETCH) > 100: errors.append('ITEMS_TO_FETCH must be in range > 0 and <= 100') else: errors.append('ITEMS_TO_FETCH has to be an integer') if len(errors) > 0: return_results( {""Type"": entryTypes[""error""], ""ContentsFormat"": formats[""text""], ""Contents"": ""Errors:\n{}"".format(""\n"".join(errors))}) # So far so good, now test the API call data['test'] = True result = http_request('POST', '/artifacts/alerts', json_dict=data) if 'msg' in result and result['msg'] == ""Test OK"": return_results('ok') else: return_results( {""Type"": entryTypes[""error""], ""ContentsFormat"": formats[""text""], ""Contents"": ""Errors:\n%s"" % repr(result)}) ","def test_module(): """"""Test module to verify settings """""" errors = list() data = dict() if TENANT_ID == '0000000-0000-0000-000000000' or TENANT_ID == '': errors.append('Incorrect tenant id') if APIKEY == '' or APIKEY == '': errors.append('Incorrect API key') if SOARTOKEN == '' or SOARTOKEN == '': errors.append('Please set proper SOAR token') if str(DAYS_BACK).isdigit(): if int(DAYS_BACK) <= 0 or int(DAYS_BACK) > 100: errors.append('DAYS_BACK must be in range > 0 and <= 100') else: errors.append('DAYS_BACK has to be an integer') if str(ITEMS_TO_FETCH).isdigit(): if int(ITEMS_TO_FETCH) <= 0 or int(ITEMS_TO_FETCH) > 100: errors.append('ITEMS_TO_FETCH must be in range > 0 and <= 100') else: errors.append('ITEMS_TO_FETCH has to be an integer') if len(errors) > 0: return_results( {""Type"": entryTypes[""error""], ""ContentsFormat"": formats[""text""], ""Contents"": ""Errors:\n{}"".format(""\n"".join(errors))}) # So far so good, now test the API call data['test'] = True result = http_request('POST', '/artifacts/alerts', json_dict=data) if 'msg' in result and result['msg'] == ""Test OK"": return_results('ok') else: return_error(""Errors:\n{}"".format(repr(result))) " 26308,"def test_recursive_in_package_data_glob(tmpdir_cwd): """""" Files matching recursive globs (**) in package_data should be included in the package data. #1806 """""" dist = Distribution(dict( script_name='setup.py', script_args=['build_py'], packages=[''], package_data={'': ['path/**/data']}, )) os.makedirs('path/subpath/subsubpath') open('path/subpath/subsubpath/data', 'w').close() dist.parse_command_line() dist.run_commands() assert stat.S_IREG(os.stat('build/lib/path/subpath/subsubpath/data').st_mode), \ ""File is not included"" ","def test_recursive_in_package_data_glob(tmpdir_cwd): """""" Files matching recursive globs (**) in package_data should be included in the package data. #1806 """""" dist = Distribution(dict( script_name='setup.py', script_args=['build_py'], packages=[''], package_data={'': ['path/**/data']}, )) os.makedirs('path/subpath/subsubpath') open('path/subpath/subsubpath/data', 'w').close() dist.parse_command_line() dist.run_commands() assert stat.S_ISREG(os.stat('build/lib/path/subpath/subsubpath/data').st_mode), \ ""File is not included"" " 43439,"def CRotx(theta): r""""""Two-qubit controlled rotation about the x axis. Args: theta (float): rotation angle Returns: array: unitary 4x4 rotation matrix ` """""" return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1*1j*np.sin(theta/2)], [0, 0, -1*1j*np.sin(theta/2), np.cos(theta/2)]]) ","def CRotx(theta): r""""""Two-qubit controlled rotation about the x axis. Args: theta (float): rotation angle Returns: array: unitary 4x4 rotation matrix """""" return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, np.cos(theta/2), -1*1j*np.sin(theta/2)], [0, 0, -1*1j*np.sin(theta/2), np.cos(theta/2)]]) " 1147,"def process_initializer(cwd): """"""Initializes the environment of the child process"""""" os.chdir(cwd) os.environ[""NO_ET""] = ""1"" ","def process_initializer(cwd): """"""Initializes the environment of the child process"""""" os.chdir(cwd) os.environ[""NO_NIPYPE_ET""] = ""1"" " 59925,"def detect_topology(): rt = runtime() detect_remote_systems = rt.get_option( 'general/0/detect_remote_system_topology' ) config_file = rt.site_config.filename if config_file == '': config_prefix = os.path.join( os.getenv('HOME'), '.reframe/topology' ) else: config_prefix = os.path.dirname(config_file) config_prefix = os.path.join(config_prefix, '_meta') for part in rt.system.partitions: getlogger().debug(f'detecting topology info for {part.fullname}') found_procinfo = False found_devinfo = False if part.processor.info != {}: # Processor info set up already in the configuration getlogger().debug( f'> topology found in configuration file; skipping...' ) found_procinfo = True if part.devices: # Devices set up already in the configuration getlogger().debug( f'> devices found in configuration file; skipping...' ) found_devinfo = True if found_procinfo and found_devinfo: continue topo_file = os.path.join( config_prefix, f'{rt.system.name}-{part.name}', 'processor.json' ) dev_file = os.path.join( config_prefix, f'{rt.system.name}-{part.name}', 'devices.json' ) if not found_procinfo and os.path.exists(topo_file): getlogger().debug( f'> found topology file {topo_file!r}; loading...' ) part.processor._info = _load_info(topo_file) found_procinfo = True if not found_devinfo and os.path.exists(dev_file): getlogger().debug( f'> found devices file {dev_file!r}; loading...' ) part._devices = _load_info(dev_file) found_devinfo = True if found_procinfo and found_devinfo: continue if not found_procinfo: # No topology found, try to auto-detect it getlogger().debug(f'> no topology file found; auto-detecting...') if _is_part_local(part): # Unconditionally detect the system for fully local partitions part.processor._info = cpuinfo() _save_info(topo_file, part.processor.info) elif detect_remote_systems: part.processor._info = _remote_detect(part) _save_info(topo_file, part.processor.info) getlogger().debug(f'> saved topology in {topo_file!r}') if not found_devinfo: getlogger().debug(f'> device auto-detection is not supported') ","def detect_topology(): rt = runtime() detect_remote_systems = rt.get_option( 'general/0/detect_remote_system_topology' ) config_file = rt.site_config.filename if config_file == '': config_prefix = os.path.join( os.getenv('HOME'), '.reframe/topology' ) else: config_prefix = os.path.join( os.path.dirname(config_file), '_meta' ) for part in rt.system.partitions: getlogger().debug(f'detecting topology info for {part.fullname}') found_procinfo = False found_devinfo = False if part.processor.info != {}: # Processor info set up already in the configuration getlogger().debug( f'> topology found in configuration file; skipping...' ) found_procinfo = True if part.devices: # Devices set up already in the configuration getlogger().debug( f'> devices found in configuration file; skipping...' ) found_devinfo = True if found_procinfo and found_devinfo: continue topo_file = os.path.join( config_prefix, f'{rt.system.name}-{part.name}', 'processor.json' ) dev_file = os.path.join( config_prefix, f'{rt.system.name}-{part.name}', 'devices.json' ) if not found_procinfo and os.path.exists(topo_file): getlogger().debug( f'> found topology file {topo_file!r}; loading...' ) part.processor._info = _load_info(topo_file) found_procinfo = True if not found_devinfo and os.path.exists(dev_file): getlogger().debug( f'> found devices file {dev_file!r}; loading...' ) part._devices = _load_info(dev_file) found_devinfo = True if found_procinfo and found_devinfo: continue if not found_procinfo: # No topology found, try to auto-detect it getlogger().debug(f'> no topology file found; auto-detecting...') if _is_part_local(part): # Unconditionally detect the system for fully local partitions part.processor._info = cpuinfo() _save_info(topo_file, part.processor.info) elif detect_remote_systems: part.processor._info = _remote_detect(part) _save_info(topo_file, part.processor.info) getlogger().debug(f'> saved topology in {topo_file!r}') if not found_devinfo: getlogger().debug(f'> device auto-detection is not supported') " 29696,"def process( frame: FrameType, child, state: dict[str, Any], *, stop: str | None = None, omit: Collection[str] = (), depth: int | None = None, ) -> dict[str, Any] | None: """"""Add counts from a frame stack onto existing state This recursively adds counts to the existing state dictionary and creates new entries for new functions. Parameters ---------- frame: The frame to process onto the state child: For internal use only state: The profile state to accumulate this frame onto, see ``create`` stop: Filenames that should stop processing if we enounter them omit: Filenames that we should omit from processing depth: For internal use only, how deep we are in the call stack Used to prevent stack overflow Examples -------- >>> import sys, threading >>> ident = threading.get_ident() # replace with your thread of interest >>> frame = sys._current_frames()[ident] >>> state = create() >>> process(frame, None, state) >>> state {'count': 1, 'identifier': 'root', 'description': 'root', 'children': {'...'}} See also -------- create merge """""" if depth is None: depth = sys.getrecursionlimit() - 50 if depth <= 0: return None if any(frame.f_code.co_filename.endswith(o) for o in omit): return None prev = frame.f_back if prev is not None and ( stop is None or not prev.f_code.co_filename.endswith(stop) ): new_state = process(prev, frame, state, stop=stop, depth=depth - 1) if new_state is None: return None state = new_state ident = identifier(frame) try: d = state[""children""][ident] except KeyError: d = { ""count"": 0, ""description"": info_frame(frame), ""children"": {}, ""identifier"": ident, } state[""children""][ident] = d state[""count""] += 1 if child is not None: return d else: d[""count""] += 1 return None ","def process( frame: FrameType, child, state: dict[str, Any], *, stop: str | None = None, omit: Collection[str] = (), depth: int | None = None, ) -> dict[str, Any] | None: """"""Add counts from a frame stack onto existing state This recursively adds counts to the existing state dictionary and creates new entries for new functions. Parameters ---------- frame: The frame to process onto the state child: For internal use only state: The profile state to accumulate this frame onto, see ``create`` stop: Filename suffix that should stop processing if we encounter it omit: Filenames that we should omit from processing depth: For internal use only, how deep we are in the call stack Used to prevent stack overflow Examples -------- >>> import sys, threading >>> ident = threading.get_ident() # replace with your thread of interest >>> frame = sys._current_frames()[ident] >>> state = create() >>> process(frame, None, state) >>> state {'count': 1, 'identifier': 'root', 'description': 'root', 'children': {'...'}} See also -------- create merge """""" if depth is None: depth = sys.getrecursionlimit() - 50 if depth <= 0: return None if any(frame.f_code.co_filename.endswith(o) for o in omit): return None prev = frame.f_back if prev is not None and ( stop is None or not prev.f_code.co_filename.endswith(stop) ): new_state = process(prev, frame, state, stop=stop, depth=depth - 1) if new_state is None: return None state = new_state ident = identifier(frame) try: d = state[""children""][ident] except KeyError: d = { ""count"": 0, ""description"": info_frame(frame), ""children"": {}, ""identifier"": ident, } state[""children""][ident] = d state[""count""] += 1 if child is not None: return d else: d[""count""] += 1 return None " 39133,"def load_tedlium_item( fileid: str, line: int, path: str, ext_audio: str, ext_txt: str ) -> Tuple[Tensor, int, str, int, int, int]: transcript_path = os.path.join(path, ""stm/"", fileid) with open(transcript_path + ext_txt) as f: transcript = f.readlines()[line] talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split( "" "", 6 ) wave_path = os.path.join(path, ""sph/"", fileid) waveform, sample_rate = torchaudio.load(wave_path + ext_audio) print(wave_path + ext_audio) # Calculate indexes for start time and endtime start_time = int(float(start_time) * sample_rate) end_time = int(float(end_time) * sample_rate) print(start_time, end_time) waveform = waveform[:, start_time:end_time] return ( waveform, sample_rate, transcript, talk_id, speaker_id, identifier, transcript, ) ","def load_tedlium_item( fileid: str, line: int, path: str, ext_audio: str, ext_txt: str ) -> Tuple[Tensor, int, str, int, int, int]: transcript_path = os.path.join(path, ""stm"", fileid) with open(transcript_path + ext_txt) as f: transcript = f.readlines()[line] talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split( "" "", 6 ) wave_path = os.path.join(path, ""sph/"", fileid) waveform, sample_rate = torchaudio.load(wave_path + ext_audio) print(wave_path + ext_audio) # Calculate indexes for start time and endtime start_time = int(float(start_time) * sample_rate) end_time = int(float(end_time) * sample_rate) print(start_time, end_time) waveform = waveform[:, start_time:end_time] return ( waveform, sample_rate, transcript, talk_id, speaker_id, identifier, transcript, ) " 53850,"def set_lb_backend_address_pool(cmd, instance, resource_group_name, vnet=None, backend_addresses=None, backend_addresses_config_file=None): if backend_addresses and backend_addresses_config_file: raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long if backend_addresses_config_file: if not isinstance(backend_addresses_config_file, list): raise CLIError('Config file must be a list. Please see example as a reference.') for addr in backend_addresses_config_file: if not isinstance(addr, dict): raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.') (LoadBalancerBackendAddress, Subnet, VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress', 'Subnet', 'VirtualNetwork') addresses_pool = [] if backend_addresses: addresses_pool.extend(backend_addresses) if backend_addresses_config_file: addresses_pool.extend(backend_addresses_config_file) for addr in addresses_pool: if 'virtual_network' not in addr and vnet: addr['virtual_network'] = vnet # pylint: disable=line-too-long if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks try: if addresses_pool: new_addresses = [] for addr in addresses_pool: # vnet | subnet | status # name/id | name/id/null | ok # null | id | ok if 'virtual_network' in addr: address = LoadBalancerBackendAddress(name=addr['name'], virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)), subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None, ip_address=addr['ip_address']) elif 'subnet' in addr and is_valid_resource_id(addr['subnet']): address = LoadBalancerBackendAddress(name=addr['name'], subnet=Subnet(id=addr['subnet']), ip_address=addr['ip_address']) else: raise KeyError new_addresses.append(address) else: new_addresses = None except KeyError: raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet ' 'name | subnet id) information.') else: try: new_addresses = [LoadBalancerBackendAddress(name=addr['name'], virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)), ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None except KeyError: raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.') instance.load_balancer_backend_addresses = new_addresses return instance ","def set_lb_backend_address_pool(cmd, instance, resource_group_name, vnet=None, backend_addresses=None, backend_addresses_config_file=None): if backend_addresses and backend_addresses_config_file: raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long if backend_addresses_config_file: if not isinstance(backend_addresses_config_file, list): raise CLIError('Config file must be a list. Please see example as a reference.') for addr in backend_addresses_config_file: if not isinstance(addr, dict): raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.') (LoadBalancerBackendAddress, Subnet, VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress', 'Subnet', 'VirtualNetwork') addresses_pool = [] if backend_addresses: addresses_pool.extend(backend_addresses) if backend_addresses_config_file: addresses_pool.extend(backend_addresses_config_file) for addr in addresses_pool: if 'virtual_network' not in addr and vnet: addr['virtual_network'] = vnet # pylint: disable=line-too-long if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks try: if addresses_pool: new_addresses = [] for addr in addresses_pool: # vnet | subnet | status # name/id | name/id/null | ok # null | id | ok if 'virtual_network' in addr: address = LoadBalancerBackendAddress(name=addr['name'], virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)), subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None, ip_address=addr['ip_address']) elif 'subnet' in addr and is_valid_resource_id(addr['subnet']): address = LoadBalancerBackendAddress(name=addr['name'], subnet=Subnet(id=addr['subnet']), ip_address=addr['ip_address']) else: raise KeyError new_addresses.append(address) else: new_addresses = None except KeyError: raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet ' 'name | subnet id) information.') else: try: new_addresses = [LoadBalancerBackendAddress(name=addr['name'], virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)), ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None except KeyError: raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.') if new_addresses: instance.load_balancer_backend_addresses = new_addresses return instance " 19346,"def python_interpreter(args): """"""A python interpreter is the default interpreter """""" # Fake a main python shell by setting __name__ to __main__. console = code.InteractiveConsole({'__name__': '__main__', 'spack': spack}) if ""PYTHONSTARTUP"" in os.environ: startup_file = os.environ[""PYTHONSTARTUP""] if os.path.isfile(startup_file): with open(startup_file) as startup: console.runsource(startup.read(), startup_file, 'exec') if args.python_command: console.runsource(args.python_command) elif args.python_args: sys.argv = args.python_args with open(args.python_args[0]) as file: console.runsource(file.read(), args.python_args[0], 'exec') else: # Provides readline support, allowing user to use arrow keys console.push('import readline') # Provide tabcompletion console.push('from rlcompleter import Completer') console.push('readline.set_completer(Completer(locals()).complete)') console.interact(""Spack version %s\nPython %s, %s %s"" % (spack.spack_version, platform.python_version(), platform.system(), platform.machine())) ","def python_interpreter(args): """"""A python interpreter is the default interpreter """""" # Fake a main python shell by setting __name__ to __main__. console = code.InteractiveConsole({'__name__': '__main__', 'spack': spack}) if ""PYTHONSTARTUP"" in os.environ: startup_file = os.environ[""PYTHONSTARTUP""] if os.path.isfile(startup_file): with open(startup_file) as startup: console.runsource(startup.read(), startup_file, 'exec') if args.python_command: console.runsource(args.python_command) elif args.python_args: sys.argv = args.python_args with open(args.python_args[0]) as file: console.runsource(file.read(), args.python_args[0], 'exec') else: # Provides readline support, allowing user to use arrow keys console.push('import readline') # Provide tabcompletion console.push('import rlcompleter') console.push('readline.set_completer(Completer(locals()).complete)') console.interact(""Spack version %s\nPython %s, %s %s"" % (spack.spack_version, platform.python_version(), platform.system(), platform.machine())) " 4547,"def _sample_locations_between_surfaces(mesh, affine, inner_mesh, n_points=10): outer_vertices, _ = mesh inner_vertices, _ = inner_mesh # when we drop support for np 1.5 replace the next 2 lines with # sample_locations = np.linspace(inner_vertices, outer_vertices, n_points) steps = np.linspace(0, 1, n_points)[:, None, None] sample_locations = inner_vertices + steps * ( outer_vertices - inner_vertices) sample_locations = np.rollaxis(sample_locations, 1) sample_locations_voxel_space = np.asarray( resampling.coord_transform( *np.vstack(sample_locations).T, affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape) return sample_locations_voxel_space ","def _sample_locations_between_surfaces(mesh, inner_mesh, affine, n_points=10): outer_vertices, _ = mesh inner_vertices, _ = inner_mesh # when we drop support for np 1.5 replace the next 2 lines with # sample_locations = np.linspace(inner_vertices, outer_vertices, n_points) steps = np.linspace(0, 1, n_points)[:, None, None] sample_locations = inner_vertices + steps * ( outer_vertices - inner_vertices) sample_locations = np.rollaxis(sample_locations, 1) sample_locations_voxel_space = np.asarray( resampling.coord_transform( *np.vstack(sample_locations).T, affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape) return sample_locations_voxel_space " 24613,"def molecule(symbol: str, Z: Integral = None) -> Particle | CustomParticle: """""" Parses molecules symbols into a |CustomParticle| ot |Particle| if possible. Parameters ---------- symbol Symbol of the molecule to be parsed. Z charge number if not present in symbol. Returns ------- A |Particle| object if the input could be parsed as such, or a |CustomParticle| with the provided symbol, charge, and a mass corresponding to the sum of the molecule elements. """""" try: return Particle(symbol, Z=Z) except ParticleError: element_dict, bare_symbol, Z = _parse_and_check_molecule_input(symbol, Z) mass = 0 * u.kg for element_symbol, amount in element_dict.items(): try: element = Particle(element_symbol) except ParticleError as e: raise InvalidParticleError( f""Could not identify {element_symbol}."" ) from e mass += amount * element.mass if Z is None: charge = 0 * u.C else: charge = Z * const.e.si bare_symbol += f"" {-Z}-"" if Z < 0 else f"" {Z}+"" return CustomParticle(mass=mass, charge=charge, symbol=bare_symbol) ","def molecule(symbol: str, Z: Integral = None) -> Union[Particle, CustomParticle]: """""" Parses molecules symbols into a |CustomParticle| ot |Particle| if possible. Parameters ---------- symbol Symbol of the molecule to be parsed. Z charge number if not present in symbol. Returns ------- A |Particle| object if the input could be parsed as such, or a |CustomParticle| with the provided symbol, charge, and a mass corresponding to the sum of the molecule elements. """""" try: return Particle(symbol, Z=Z) except ParticleError: element_dict, bare_symbol, Z = _parse_and_check_molecule_input(symbol, Z) mass = 0 * u.kg for element_symbol, amount in element_dict.items(): try: element = Particle(element_symbol) except ParticleError as e: raise InvalidParticleError( f""Could not identify {element_symbol}."" ) from e mass += amount * element.mass if Z is None: charge = 0 * u.C else: charge = Z * const.e.si bare_symbol += f"" {-Z}-"" if Z < 0 else f"" {Z}+"" return CustomParticle(mass=mass, charge=charge, symbol=bare_symbol) " 34230,"def get_component_class(component_name: Text) -> Type[""Component""]: """"""Resolve component name to a registered components class."""""" if component_name not in registered_components: if component_name not in old_style_names: try: return class_from_module_path(component_name) except ModuleNotFoundError as e: # when component_name is a path to a class but that path is invalid raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n{}"".format( component_name, e.msg ) ) except AttributeError: # when component_name is a path to a class but the path does not contain that class module_name, _, class_name = component_name.rpartition(""."") raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n"" ""Cannot find class '{}' in module {}."".format( component_name, class_name, module_name ) ) except ImportError: # when component_name is a class name and not part of old_style_names raise Exception( ""Failed to find component class for '{0}'.Unknown component name.\n"" ""Cannot import class '{0}' from global namespace."".format( component_name ) ) else: # DEPRECATED ensures compatibility, remove in future versions logger.warning( ""DEPRECATION warning: your nlu config file "" ""contains old style component name `{}`, "" ""you should change it to its class name: `{}`."" """".format(component_name, old_style_names[component_name]) ) component_name = old_style_names[component_name] return registered_components[component_name] ","def get_component_class(component_name: Text) -> Type[""Component""]: """"""Resolve component name to a registered components class."""""" if component_name not in registered_components: if component_name not in old_style_names: try: return class_from_module_path(component_name) except ModuleNotFoundError as e: # when component_name is a path to a class but that path is invalid raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n{}"".format( component_name, e.msg ) ) except AttributeError: # when component_name is a path to a class but the path does not contain that class module_name, _, class_name = component_name.rpartition(""."") raise Exception( ""Failed to find component class for '{}'.Unknown component name.\n"" """".format( component_name, class_name, module_name ) ) except ImportError: # when component_name is a class name and not part of old_style_names raise Exception( ""Failed to find component class for '{0}'.Unknown component name.\n"" ""Cannot import class '{0}' from global namespace."".format( component_name ) ) else: # DEPRECATED ensures compatibility, remove in future versions logger.warning( ""DEPRECATION warning: your nlu config file "" ""contains old style component name `{}`, "" ""you should change it to its class name: `{}`."" """".format(component_name, old_style_names[component_name]) ) component_name = old_style_names[component_name] return registered_components[component_name] " 19631,"def find_preferably_prefixed_executable(executable, build_prefix=None, all_matches=False, host_subdir=None): if host_subdir == ""osx-64"": exe_prefix = ""x86_64-*"" elif host_subdir == ""osx-arm64"": exe_prefix = ""arm64-*"" elif not host_subdir: exe_prefix = ""*"" else: import warnings warnings.warn('unknown host_subdir' % host_subdir, UserWarning) exe_prefix = ""*"" found = find_executable(exe_prefix + executable, build_prefix, all_matches) if not found: # It is possible to force non-prefixed exes by passing os.sep as the # first character in executable. basename makes this work. found = find_executable(os.path.basename(executable), build_prefix) return found ","def find_preferably_prefixed_executable(executable, build_prefix=None, all_matches=False, host_subdir=None): if host_subdir == ""osx-64"": exe_prefix = ""x86_64-*"" elif host_subdir == ""osx-arm64"": exe_prefix = ""arm64-*"" elif not host_subdir: exe_prefix = ""*"" else: import warnings warnings.warn('unknown host_subdir %s' % host_subdir, UserWarning) exe_prefix = ""*"" found = find_executable(exe_prefix + executable, build_prefix, all_matches) if not found: # It is possible to force non-prefixed exes by passing os.sep as the # first character in executable. basename makes this work. found = find_executable(os.path.basename(executable), build_prefix) return found " 8121,"def sample_at_coords(smap, coordinates): """""" Samples the data in a map at given series of coordinates. Parameters ---------- smap : `~sunpy.map.GenericMap` A SunPy map. coordinates : `~astropy.coordinates.SkyCoord` A list of input coordinates Returns ------- `~numpy.array` A `numpy.array` corresponding to the data obtained from the map, at the input coordinates. """""" return smap.data[smap.wcs.world_to_array_index(coordinates)] ","def sample_at_coords(smap, coordinates): """""" Samples the data in a map at given series of coordinates. Parameters ---------- smap : `~sunpy.map.GenericMap` A SunPy map. coordinates : `~astropy.coordinates.SkyCoord` A list of input coordinates Returns ------- `numpy.array` A `numpy.array` corresponding to the data obtained from the map, at the input coordinates. """""" return smap.data[smap.wcs.world_to_array_index(coordinates)] " 38947,"def copy_code(code: CodeType, **update: Any) -> CodeType: if PY38: # pragma: no cover return code.replace(**update) # type: ignore return CodeType(*[update.get(arg, getattr(code, arg)) for arg in CODE_ARGS]) ","def copy_code(code: CodeType, **update: Any) -> CodeType: if PY38: return code.replace(**update) # type: ignore return CodeType(*[update.get(arg, getattr(code, arg)) for arg in CODE_ARGS]) " 58072,"def get_whitelist_iocs_command( client: Client, args=Dict[str, Any] ) -> List[CommandResults]: """""" get_tags commands: Returns paginated list of tags """""" page = int(args.get(""page"", 1)) page_size = int(args.get(""page_size"", 10)) query = args.get(""q"") response = client.get_whitelist_iocs(page, page_size, query) ioc_list = response.get(""data"", {}).get(""results"", []) results = [] for ioc in ioc_list: results.append( CommandResults( readable_output=tableToMarkdown(""Whitelist IOC"", ioc, removeNull=True), outputs_prefix=""CTIX.IOC"", outputs_key_field=""value"", outputs=ioc, ) ) return results ","def get_whitelist_iocs_command( client: Client, args=Dict[str, Any] ) -> List[CommandResults]: """""" get_tags commands: Returns paginated list of tags """""" page = arg_to_number(args.get(""page"", 1)) page_size = arg_to_number(args.get(""page_size"", 10)) query = args.get(""q"") response = client.get_whitelist_iocs(page, page_size, query) ioc_list = response.get(""data"", {}).get(""results"", []) results = [] for ioc in ioc_list: results.append( CommandResults( readable_output=tableToMarkdown(""Whitelist IOC"", ioc, removeNull=True), outputs_prefix=""CTIX.IOC"", outputs_key_field=""value"", outputs=ioc, ) ) return results " 12345,"def encodeVarint(integer): """""""""""" if integer < 0: logger.error('varint cannot be < 0') raise SystemExit if integer < 253: return pack('>B', integer) if integer >= 253 and integer < 65536: return pack('>B', 253) + pack('>H', integer) if integer >= 65536 and integer < 4294967296: return pack('>B', 254) + pack('>I', integer) if integer >= 4294967296 and integer < 18446744073709551616: return pack('>B', 255) + pack('>Q', integer) if integer >= 18446744073709551616: logger.error('varint cannot be >= 18446744073709551616') raise SystemExit ","def encodeVarint(integer): """"""Convert integer into varint bytes"""""" if integer < 0: logger.error('varint cannot be < 0') raise SystemExit if integer < 253: return pack('>B', integer) if integer >= 253 and integer < 65536: return pack('>B', 253) + pack('>H', integer) if integer >= 65536 and integer < 4294967296: return pack('>B', 254) + pack('>I', integer) if integer >= 4294967296 and integer < 18446744073709551616: return pack('>B', 255) + pack('>Q', integer) if integer >= 18446744073709551616: logger.error('varint cannot be >= 18446744073709551616') raise SystemExit " 15348,"def library_payload(roon_server, zone_id, media_content_id): """"""Create response payload for the library."""""" opts = { ""hierarchy"": ""browse"", ""zone_or_output_id"": zone_id, ""count"": ITEM_LIMIT, } # Roon starts browsing for a zone where it left off - so start from the top unless otherwise specified if media_content_id is None or media_content_id == ""Explore"": opts[""pop_all""] = True content_id = ""Explore"" else: opts[""item_key""] = media_content_id content_id = media_content_id result_header = roon_server.roonapi.browse_browse(opts) _LOGGER.debug(""Result_header %s"", result_header) header = result_header[""list""] title = header.get(""title"") subtitle = header.get(""subtitle"") if subtitle is None: list_title = title else: list_title = f""{title} ({subtitle})"" total_count = header[""count""] library_image_id = header.get(""image_key"") library_info = BrowseMedia( title=list_title, media_content_id=content_id, media_content_type=""library"", media_class=MEDIA_CLASS_DIRECTORY, can_play=False, can_expand=True, children=[], ) result_detail = roon_server.roonapi.browse_load(opts) _LOGGER.debug(""result_detail %s"", result_detail) items = result_detail[""items""] count = len(items) if count < total_count: _LOGGER.debug( ""Exceeded limit of %d, loaded %d/%d"", ITEM_LIMIT, count, total_count ) for item in items: if item.get(""title"") in EXCLUDE_ITEMS: continue entry = item_payload(roon_server, item, library_image_id) library_info.children.append(entry) return library_info ","def library_payload(roon_server, zone_id, media_content_id): """"""Create response payload for the library."""""" opts = { ""hierarchy"": ""browse"", ""zone_or_output_id"": zone_id, ""count"": ITEM_LIMIT, } # Roon starts browsing for a zone where it left off - so start from the top unless otherwise specified if media_content_id is None or media_content_id == ""Explore"": opts[""pop_all""] = True content_id = ""Explore"" else: opts[""item_key""] = media_content_id content_id = media_content_id result_header = roon_server.roonapi.browse_browse(opts) _LOGGER.debug(""Result_header %s"", result_header) header = result_header[""list""] title = header.get(""title"") subtitle = header.get(""subtitle"") if subtitle is None: list_title = title else: list_title = f""{title} ({subtitle})"" total_count = header[""count""] library_image_id = header.get(""image_key"") library_info = BrowseMedia( title=list_title, media_content_id=content_id, media_content_type=""library"", media_class=MEDIA_CLASS_DIRECTORY, can_play=False, can_expand=True, children=[], ) result_detail = roon_server.roonapi.browse_load(opts) _LOGGER.debug(""Result detail %s"", result_detail) items = result_detail[""items""] count = len(items) if count < total_count: _LOGGER.debug( ""Exceeded limit of %d, loaded %d/%d"", ITEM_LIMIT, count, total_count ) for item in items: if item.get(""title"") in EXCLUDE_ITEMS: continue entry = item_payload(roon_server, item, library_image_id) library_info.children.append(entry) return library_info " 31942,"def get_identity_info() -> List[Dict]: context = demisto.context() alerts = demisto.get(context, 'PaloAltoNetworksXDR.OriginalAlert') users = demisto.get(context, 'AWS.IAM.Users') if not users: raise DemistoException('AWS users are not in context') access_keys = users[0].get('AccessKeys', []) if not isinstance(alerts, list): alerts = [alerts] results = [] for alert in alerts: alert_event = alert.get('event') username = alert_event.get('identity_orig').get('userName') access_keys_ids = list({access_key.get('AccessKeyId') for access_key in access_keys if access_key and access_key.get('UserName') == username}) res = {'Name': alert_event.get('identity_name'), 'Type': alert_event.get('identity_type'), 'Sub Type': alert_event.get('identity_sub_type'), 'Uuid': alert_event.get('identity_uuid'), 'Provider': alert_event.get('cloud_provider'), 'Access Keys': access_keys_ids} if res not in results: results.append(res) return results ","def get_identity_info() -> List[Dict]: context = demisto.context() alerts = demisto.get(context, 'PaloAltoNetworksXDR.OriginalAlert') users = demisto.get(context, 'AWS.IAM.Users') if not users: raise DemistoException('AWS users are not in context') access_keys = users[0].get('AccessKeys', []) if not isinstance(alerts, list): alerts = [alerts] results = [] for alert in alerts: alert_event = alert.get('event') username = alert_event.get('identity_orig').get('userName') access_keys_ids = list({access_key.get('AccessKeyId') for access_key in access_keys if isinstance(access_key, dict) and access_key.get('UserName') == username}) res = {'Name': alert_event.get('identity_name'), 'Type': alert_event.get('identity_type'), 'Sub Type': alert_event.get('identity_sub_type'), 'Uuid': alert_event.get('identity_uuid'), 'Provider': alert_event.get('cloud_provider'), 'Access Keys': access_keys_ids} if res not in results: results.append(res) return results " 7205,"def euler_number(image, connectivity=None): """"""Calculate the Euler characteristic in binary image. A neighbourhood configuration is constructed, and a LUT is applied for each configuration. Parameters ---------- image: (N, M) ndarray or (N, M, D) ndarray. 2D or 3D images. If image is not binary, all values strictly greater than zero are considered as the object. connectivity : int, optional Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. Accepted values are ranging from 1 to input.ndim. If ``None``, a full connectivity of ``input.ndim`` is used. 4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2, respectively). 6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3, respectively). Connectivity 2 is not defined. Returns ------- euler_number : int Euler characteristic of the set of all objects in the image. Notes ----- The Euler characteristic is an integer number that describes the topology of the set of all objects in the input image. If object is 4-connected, then background is 8-connected, and conversely. References ---------- .. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par diagrammes de forme et voisinages adaptatifs generaux. PhD thesis, 2010. Ecole Nationale Superieure des Mines de Saint-Etienne. https://tel.archives-ouvertes.fr/tel-00560838 .. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of Discretized Sets - On the Choice of Adjacency in Homogeneous Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed Matter. Lecture Notes in Physics, vol 600. Springer, Berlin, Heidelberg. Examples -------- >>> import numpy as np >>> SAMPLE = np.zeros((100,100,100)); >>> SAMPLE[40:60, 40:60, 40:60]=1 >>> euler_number(SAMPLE) # doctest: +ELLIPSIS 1... >>> SAMPLE[45:55,45:55,45:55] = 0; >>> euler_number(SAMPLE) # doctest: +ELLIPSIS 2... >>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], ... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], ... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], ... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]) >>> euler_number(SAMPLE) # doctest: 0 >>> euler_number(SAMPLE, connectivity=1) # doctest: 2 """""" # as image can be a label image, transform it to binary image = (image > 0).astype(np.int) image = pad(image, ((1, 1),), mode='constant') # check connectivity if connectivity is None: connectivity = image.ndim if image.ndim == 3 and connectivity == 2: raise NotImplementedError('For 3D images, Euler number is implemented ' 'for connectivities 1 and 3 only') # config variable is an adjacency configuration. A coefficient given by # variable coefs is attributed to each configuration in order to get # the Euler characteristic. if image.ndim == 2: config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]) if connectivity == 1: coefs = [0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0] else: coefs = [0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0] bins = 16 else: # 3D images config = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 1, 4], [0, 2, 8]], [[0, 0, 0], [0, 16, 64], [0, 32, 128]]]) coefs26 = np.array([0, 1, 1, 0, 1, 0, -2, -1, 1, -2, 0, -1, 0, -1, -1, 0, 1, 0, -2, -1, -2, -1, -1, -2, -6, -3, -3, -2, -3, -2, 0, -1, 1, -2, 0, -1, -6, -3, -3, -2, -2, -1, -1, -2, -3, 0, -2, -1, 0, -1, -1, 0, -3, -2, 0, -1, -3, 0, -2, -1, 0, 1, 1, 0, 1, -2, -6, -3, 0, -1, -3, -2, -2, -1, -3, 0, -1, -2, -2, -1, 0, -1, -3, -2, -1, 0, 0, -1, -3, 0, 0, 1, -2, -1, 1, 0, -2, -1, -3, 0, -3, 0, 0, 1, -1, 4, 0, 3, 0, 3, 1, 2, -1, -2, -2, -1, -2, -1, 1, 0, 0, 3, 1, 2, 1, 2, 2, 1, 1, -6, -2, -3, -2, -3, -1, 0, 0, -3, -1, -2, -1, -2, -2, -1, -2, -3, -1, 0, -1, 0, 4, 3, -3, 0, 0, 1, 0, 1, 3, 2, 0, -3, -1, -2, -3, 0, 0, 1, -1, 0, 0, -1, -2, 1, -1, 0, -1, -2, -2, -1, 0, 1, 3, 2, -2, 1, -1, 0, 1, 2, 2, 1, 0, -3, -3, 0, -1, -2, 0, 1, -1, 0, -2, 1, 0, -1, -1, 0, -1, -2, 0, 1, -2, -1, 3, 2, -2, 1, 1, 2, -1, 0, 2, 1, -1, 0, -2, 1, -2, 1, 1, 2, -2, 3, -1, 2, -1, 2, 0, 1, 0, -1, -1, 0, -1, 0, 2, 1, -1, 2, 0, 1, 0, 1, 1, 0, ]) if connectivity == 1: coefs = coefs26[::-1] else: coefs = coefs26 bins = 256 XF = ndi.convolve(image, config, mode='constant', cval=0) h = np.bincount(XF.ravel(), minlength=bins) if image.ndim == 2: return coefs@h else: return np.int(1./8 * coefs@h) ","def euler_number(image, connectivity=None): """"""Calculate the Euler characteristic in binary image. A neighbourhood configuration is constructed, and a LUT is applied for each configuration. Parameters ---------- image: (N, M) ndarray or (N, M, D) ndarray. 2D or 3D images. If image is not binary, all values strictly greater than zero are considered as the object. connectivity : int, optional Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. Accepted values are ranging from 1 to input.ndim. If ``None``, a full connectivity of ``input.ndim`` is used. 4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2, respectively). 6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3, respectively). Connectivity 2 is not defined. Returns ------- euler_number : int Euler characteristic of the set of all objects in the image. Notes ----- The Euler characteristic is an integer number that describes the topology of the set of all objects in the input image. If object is 4-connected, then background is 8-connected, and conversely. References ---------- .. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par diagrammes de forme et voisinages adaptatifs generaux. PhD thesis, 2010. Ecole Nationale Superieure des Mines de Saint-Etienne. https://tel.archives-ouvertes.fr/tel-00560838 .. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of Discretized Sets - On the Choice of Adjacency in Homogeneous Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed Matter. Lecture Notes in Physics, vol 600. Springer, Berlin, Heidelberg. Examples -------- >>> import numpy as np >>> SAMPLE = np.zeros((100,100,100)); >>> SAMPLE[40:60, 40:60, 40:60]=1 >>> euler_number(SAMPLE) # doctest: +ELLIPSIS 1... >>> SAMPLE[45:55,45:55,45:55] = 0; >>> euler_number(SAMPLE) # doctest: +ELLIPSIS 2... >>> SAMPLE = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], ... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], ... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], ... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], ... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]) >>> euler_number(SAMPLE) # doctest: 0 >>> euler_number(SAMPLE, connectivity=1) # doctest: 2 """""" # as image can be a label image, transform it to binary image = (image > 0).astype(np.int) image = pad(image, ((1, 1),), mode='constant') # check connectivity if connectivity is None: connectivity = image.ndim if image.ndim == 3 and connectivity == 2: raise NotImplementedError('For 3D images, Euler number is implemented ' 'for connectivities 1 and 3 only') # config variable is an adjacency configuration. A coefficient given by # variable coefs is attributed to each configuration in order to get # the Euler characteristic. if image.ndim == 2: config = np.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]) if connectivity == 1: coefs = [0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0] else: coefs = [0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0] bins = 16 else: # 3D images config = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 1, 4], [0, 2, 8]], [[0, 0, 0], [0, 16, 64], [0, 32, 128]]]) coefs26 = np.array([0, 1, 1, 0, 1, 0, -2, -1, 1, -2, 0, -1, 0, -1, -1, 0, 1, 0, -2, -1, -2, -1, -1, -2, -6, -3, -3, -2, -3, -2, 0, -1, 1, -2, 0, -1, -6, -3, -3, -2, -2, -1, -1, -2, -3, 0, -2, -1, 0, -1, -1, 0, -3, -2, 0, -1, -3, 0, -2, -1, 0, 1, 1, 0, 1, -2, -6, -3, 0, -1, -3, -2, -2, -1, -3, 0, -1, -2, -2, -1, 0, -1, -3, -2, -1, 0, 0, -1, -3, 0, 0, 1, -2, -1, 1, 0, -2, -1, -3, 0, -3, 0, 0, 1, -1, 4, 0, 3, 0, 3, 1, 2, -1, -2, -2, -1, -2, -1, 1, 0, 0, 3, 1, 2, 1, 2, 2, 1, 1, -6, -2, -3, -2, -3, -1, 0, 0, -3, -1, -2, -1, -2, -2, -1, -2, -3, -1, 0, -1, 0, 4, 3, -3, 0, 0, 1, 0, 1, 3, 2, 0, -3, -1, -2, -3, 0, 0, 1, -1, 0, 0, -1, -2, 1, -1, 0, -1, -2, -2, -1, 0, 1, 3, 2, -2, 1, -1, 0, 1, 2, 2, 1, 0, -3, -3, 0, -1, -2, 0, 1, -1, 0, -2, 1, 0, -1, -1, 0, -1, -2, 0, 1, -2, -1, 3, 2, -2, 1, 1, 2, -1, 0, 2, 1, -1, 0, -2, 1, -2, 1, 1, 2, -2, 3, -1, 2, -1, 2, 0, 1, 0, -1, -1, 0, -1, 0, 2, 1, -1, 2, 0, 1, 0, 1, 1, 0, ]) if connectivity == 1: coefs = coefs26[::-1] else: coefs = coefs26 bins = 256 XF = ndi.convolve(image, config, mode='constant', cval=0) h = np.bincount(XF.ravel(), minlength=bins) if image.ndim == 2: return coefs@h else: return np.int(0.125 * coefs @ h) " 19543,"def diff(parser, args): env = ev.active_environment() if len(args.specs) != 2: tty.die(""You must provide two specs to diff."") specs = [] for spec in spack.cmd.parse_specs(args.specs): if spec.concrete: specs.append(spec) else: specs.append(spack.cmd.disambiguiate_spec(spec, env, first=args.load_first)) # Calculate the comparison (c) color = False if args.dump_json else get_color_when() c = compare_specs(specs[0], specs[1], to_string=True, color=color) # Default to all attributes attributes = args.attribute or [""all""] if args.dump_json: print(sjson.dump(c)) else: tty.warn(""This interface is subject to change.\n"") print_difference(c, attributes) ","def diff(parser, args): env = ev.active_environment() if len(args.specs) != 2: tty.die(""You must provide two specs to diff."") specs = [] for spec in spack.cmd.parse_specs(args.specs): if spec.concrete: specs.append(spec) else: specs.append(spack.cmd.disambiguate_spec(spec, env, first=args.load_first)) # Calculate the comparison (c) color = False if args.dump_json else get_color_when() c = compare_specs(specs[0], specs[1], to_string=True, color=color) # Default to all attributes attributes = args.attribute or [""all""] if args.dump_json: print(sjson.dump(c)) else: tty.warn(""This interface is subject to change.\n"") print_difference(c, attributes) " 5890,"def distutils_scheme( dist_name, user=False, home=None, root=None, isolated=False, prefix=None ): # type:(str, bool, str, str, bool, str) -> dict """""" Return a distutils install scheme """""" from distutils.dist import Distribution dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]] if isolated: dist_args[""script_args""] = [""--no-user-cfg""] d = Distribution(dist_args) d.parse_config_files() obj = d.get_command_obj('install', create=True) # type: Optional[Command] assert obj is not None i = cast(install, obj) # NOTE: setting user or home has the side-effect of creating the home dir # or user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. assert not (user and prefix), ""user={} prefix={}"".format(user, prefix) assert not (home and prefix), ""home={} prefix={}"".format(home, prefix) i.user = user or i.user if user or home: i.prefix = """" i.prefix = prefix or i.prefix i.home = home or i.home i.root = root or i.root i.finalize_options() scheme = {} for key in SCHEME_KEYS: scheme[key] = getattr(i, 'install_' + key) # install_lib specified in setup.cfg should install *everything* # into there (i.e. it takes precedence over both purelib and # platlib). Note, i.install_lib is *always* set after # finalize_options(); we only want to override here if the user # has explicitly requested it hence going back to the config if 'install_lib' in d.get_option_dict('install'): scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) if running_under_virtualenv(): scheme['headers'] = os.path.join( sys.prefix, 'include', 'site', 'python{}'.format(get_major_minor_version()), dist_name, ) if root is not None: path_no_drive = os.path.splitdrive( os.path.abspath(scheme[""headers""]))[1] scheme[""headers""] = os.path.join( root, path_no_drive[1:], ) return scheme ","def distutils_scheme( dist_name, user=False, home=None, root=None, isolated=False, prefix=None ): # type:(str, bool, str, str, bool, str) -> dict """""" Return a distutils install scheme """""" from distutils.dist import Distribution dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]] if isolated: dist_args[""script_args""] = [""--no-user-cfg""] d = Distribution(dist_args) d.parse_config_files() obj = d.get_command_obj('install', create=True) # type: Optional[Command] assert obj is not None i = cast(distutils_install_command, obj) # NOTE: setting user or home has the side-effect of creating the home dir # or user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. assert not (user and prefix), ""user={} prefix={}"".format(user, prefix) assert not (home and prefix), ""home={} prefix={}"".format(home, prefix) i.user = user or i.user if user or home: i.prefix = """" i.prefix = prefix or i.prefix i.home = home or i.home i.root = root or i.root i.finalize_options() scheme = {} for key in SCHEME_KEYS: scheme[key] = getattr(i, 'install_' + key) # install_lib specified in setup.cfg should install *everything* # into there (i.e. it takes precedence over both purelib and # platlib). Note, i.install_lib is *always* set after # finalize_options(); we only want to override here if the user # has explicitly requested it hence going back to the config if 'install_lib' in d.get_option_dict('install'): scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) if running_under_virtualenv(): scheme['headers'] = os.path.join( sys.prefix, 'include', 'site', 'python{}'.format(get_major_minor_version()), dist_name, ) if root is not None: path_no_drive = os.path.splitdrive( os.path.abspath(scheme[""headers""]))[1] scheme[""headers""] = os.path.join( root, path_no_drive[1:], ) return scheme " 1832,"def test_check_unknown_errors_both_missing_values(): # _check_unknown does not support both types of missing values = np.array(['a', 'c', 'c', None, np.nan], dtype=object) msg = (""Input wiith both types of missing, None and np.nan, is not "" ""supported"") with pytest.raises(ValueError, match=msg): _check_unknown(values, known_values=np.array(['a', 'c'])) ","def test_check_unknown_errors_both_missing_values(): # _check_unknown does not support both types of missing values = np.array(['a', 'c', 'c', None, np.nan], dtype=object) msg = (""Input with both types of missing, None and np.nan, is not "" ""supported"") with pytest.raises(ValueError, match=msg): _check_unknown(values, known_values=np.array(['a', 'c'])) " 31811,"def item_handle_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]: try: item_id = str(args.get('item_id')) action_res = client.action_on_item(item_id=item_id, action=""handled"") if isinstance(action_res, dict) and isinstance(action_res.get('data'), dict) \ and action_res['data'].get('value'): readable_output = 'Item marked as handled' return CommandResults(readable_output=readable_output) else: raise Exception(""Action failed!"") except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}') return None ","def item_handle_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, None]: try: item_id = args.get('item_id') action_res = client.action_on_item(item_id=item_id, action=""handled"") if isinstance(action_res, dict) and isinstance(action_res.get('data'), dict) \ and action_res['data'].get('value'): readable_output = 'Item marked as handled' return CommandResults(readable_output=readable_output) else: raise Exception(""Action failed!"") except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute command on {item_id}.\nError:\n {str(e)}') return None " 21242,"def group_required(*group_names): """""" Verify user group memebership :param group_names: Array of strings :return: user_passes_test """""" def in_groups(u): if u.is_authenticated(): if bool(u.groups.filter(name__in=group_names)) | u.is_superuser: return True return False return user_passes_test(in_groups) ","def group_required(*group_names): """""" Verify user group memebership :param group_names: Array of strings :return: Whether the user is in one of the groups """""" def in_groups(u): if u.is_authenticated(): if bool(u.groups.filter(name__in=group_names)) | u.is_superuser: return True return False return user_passes_test(in_groups) " 30244,"def prepare_fetch_query(fetch_timestamp): query = FETCH_QUERY_DICT[demisto.params().get('fetch_query', 'Traps Threats')] if 'tms' in query: query += f"" WHERE serverTime>'{fetch_timestamp}'"" FETCH_SEVERITY = demisto.params().get('traps_severity', ['all']) if 'all' not in FETCH_SEVERITY: query += ' AND (' for index, severity in enumerate(FETCH_SEVERITY): if index == (len(FETCH_SEVERITY) - 1): query += f""messageData.trapsSeverity='{severity}'"" else: query += f""messageData.trapsSeverity='{severity}' OR "" query += ')' if 'panw' in query: query += f' WHERE receive_time>{fetch_timestamp}' FETCH_SEVERITY = demisto.params().get('firewall_severity', ['all']) FETCH_SUBTYPE = demisto.params().get('firewall_subtype', ['all']) if 'all' not in FETCH_SUBTYPE: query += ' AND (' for index, subtype in enumerate(FETCH_SUBTYPE): if index == (len(FETCH_SUBTYPE) - 1): query += f""subtype='{subtype}'"" else: query += f""subtype='{subtype}' OR "" query += ')' if 'all' not in FETCH_SEVERITY: query += ' AND (' for index, severity in enumerate(FETCH_SEVERITY): if index == (len(FETCH_SEVERITY) - 1): query += f""severity='{severity}'"" else: query += f""severity='{severity}' OR "" query += ')' return query ","def prepare_fetch_query(fetch_timestamp): query = FETCH_QUERY_DICT[FETCH_QUERY] if 'tms' in query: query += f"" WHERE serverTime>'{fetch_timestamp}'"" FETCH_SEVERITY = demisto.params().get('traps_severity', ['all']) if 'all' not in FETCH_SEVERITY: query += ' AND (' for index, severity in enumerate(FETCH_SEVERITY): if index == (len(FETCH_SEVERITY) - 1): query += f""messageData.trapsSeverity='{severity}'"" else: query += f""messageData.trapsSeverity='{severity}' OR "" query += ')' if 'panw' in query: query += f' WHERE receive_time>{fetch_timestamp}' FETCH_SEVERITY = demisto.params().get('firewall_severity', ['all']) FETCH_SUBTYPE = demisto.params().get('firewall_subtype', ['all']) if 'all' not in FETCH_SUBTYPE: query += ' AND (' for index, subtype in enumerate(FETCH_SUBTYPE): if index == (len(FETCH_SUBTYPE) - 1): query += f""subtype='{subtype}'"" else: query += f""subtype='{subtype}' OR "" query += ')' if 'all' not in FETCH_SEVERITY: query += ' AND (' for index, severity in enumerate(FETCH_SEVERITY): if index == (len(FETCH_SEVERITY) - 1): query += f""severity='{severity}'"" else: query += f""severity='{severity}' OR "" query += ')' return query " 9902,"def main(): argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), identifier=dict(required=True), description=dict(required=True), subnetids=dict(type='list', required=True), ) global module module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) exit_message = None changed = False if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') state = module.params.get('state') aws_config_region, ec2_url, aws_connect_params = \ get_aws_connection_info(module, boto3=True) dmsclient = get_dms_client(aws_connect_params, aws_config_region, ec2_url) subnet_group = describe_subnet_group(dmsclient, module.params.get('identifier')) if state == 'present': if replication_subnet_exists(subnet_group): if compare_params(subnet_group[""ReplicationSubnetGroups""][0]): if not module.check_mode: exit_message = modify_replication_subnet_group(dmsclient) else: exit_message = dmsclient changed = True else: exit_message = ""No changes to Subnet group"" else: if not module.check_mode: exit_message = create_replication_subnet_group(dmsclient) changed = True else: exit_message = ""Check mode enabled"" elif state == 'absent': if replication_subnet_exists(subnet_group): replication_subnet_group_delete(dmsclient) changed = True exit_message = ""Replication subnet group Deleted"" else: changed = False exit_message = ""Replication subnet group does not exist"" module.exit_json(changed=changed, msg=exit_message) ","def main(): argument_spec = dict( state=dict(choices=['present', 'absent'], default='present'), identifier=dict(required=True), description=dict(required=True), subnetids=dict(type='list', required=True, elements='str'), ) global module module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) exit_message = None changed = False if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') state = module.params.get('state') aws_config_region, ec2_url, aws_connect_params = \ get_aws_connection_info(module, boto3=True) dmsclient = get_dms_client(aws_connect_params, aws_config_region, ec2_url) subnet_group = describe_subnet_group(dmsclient, module.params.get('identifier')) if state == 'present': if replication_subnet_exists(subnet_group): if compare_params(subnet_group[""ReplicationSubnetGroups""][0]): if not module.check_mode: exit_message = modify_replication_subnet_group(dmsclient) else: exit_message = dmsclient changed = True else: exit_message = ""No changes to Subnet group"" else: if not module.check_mode: exit_message = create_replication_subnet_group(dmsclient) changed = True else: exit_message = ""Check mode enabled"" elif state == 'absent': if replication_subnet_exists(subnet_group): replication_subnet_group_delete(dmsclient) changed = True exit_message = ""Replication subnet group Deleted"" else: changed = False exit_message = ""Replication subnet group does not exist"" module.exit_json(changed=changed, msg=exit_message) " 29723,"def get_device_index_and_uuid(device): """"""Get both device index and UUID from device index or UUID Parameters ---------- device: ``int``, ``bytes`` or``str`` An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID of a CUDA (either GPU or MIG) device. Returns ------- out: ``dict`` Dictionary containing ``""device-index""`` and ``""uuid""`` keys. Examples -------- >>> get_device_index_and_uuid(0) {'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'} >>> get_device_index_and_uuid('GPU-e1006a74-5836-264f-5c26-53d19d212dfe') {'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'} >>> get_device_index_and_uuid('MIG-7feb6df5-eccf-5faa-ab00-9a441867e237') {'device-index': 0, 'uuid': b'MIG-7feb6df5-eccf-5faa-ab00-9a441867e237'} """""" init_once() try: device_index = int(device) device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_index) uuid = pynvml.nvmlDeviceGetUUID(device_handle) except ValueError: uuid = device if isinstance(device, bytes) else bytes(device, ""utf-8"") # Validate UUID, get index and UUID as seen with `nvidia-smi -L` uuid_handle = pynvml.nvmlDeviceGetHandleByUUID(uuid) device_index = pynvml.nvmlDeviceGetIndex(uuid_handle) uuid = pynvml.nvmlDeviceGetUUID(uuid_handle) return {""device-index"": device_index, ""uuid"": uuid} ","def get_device_index_and_uuid(device): """"""Get both device index and UUID from device index or UUID Parameters ---------- device: ``int``, ``bytes`` or``str`` An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID of a CUDA (either GPU or MIG) device. Returns ------- out : dict Dictionary containing ``""device-index""`` and ``""uuid""`` keys. Examples -------- >>> get_device_index_and_uuid(0) {'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'} >>> get_device_index_and_uuid('GPU-e1006a74-5836-264f-5c26-53d19d212dfe') {'device-index': 0, 'uuid': b'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'} >>> get_device_index_and_uuid('MIG-7feb6df5-eccf-5faa-ab00-9a441867e237') {'device-index': 0, 'uuid': b'MIG-7feb6df5-eccf-5faa-ab00-9a441867e237'} """""" init_once() try: device_index = int(device) device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_index) uuid = pynvml.nvmlDeviceGetUUID(device_handle) except ValueError: uuid = device if isinstance(device, bytes) else bytes(device, ""utf-8"") # Validate UUID, get index and UUID as seen with `nvidia-smi -L` uuid_handle = pynvml.nvmlDeviceGetHandleByUUID(uuid) device_index = pynvml.nvmlDeviceGetIndex(uuid_handle) uuid = pynvml.nvmlDeviceGetUUID(uuid_handle) return {""device-index"": device_index, ""uuid"": uuid} " 44288,"def enable_return(): """"""Function that turns on the new return type system. The new system guarantees intuitive return types such that a sequence (e.g., list or tuple) is returned based on the `return` statement of the quantum function. This system avoids the creation of ragged arrays, where multiple measurements are stacked together. **Example** The following example shows that for multiple measurements the current PennyLane system is creating a ragged tensor. ```python dev = qml.device(""default.qubit"", wires=2) def circuit(x): qml.Hadamard(wires=[0]) qml.CRX(x, wires=[0, 1]) return qml.probs(wires=[0]), qml.vn_entropy(wires=[0]), qml.probs(wires=1), qml.expval(qml.PauliZ(wires=1)) qnode = qml.QNode(circuit, dev) ``` ```pycon >>> res = qnode(0.5) >>> res tensor([0.5 , 0.5 , 0.08014815, 0.96939564, 0.03060436, 0.93879128], requires_grad=True) ``` when you activate the new return type the results are simply a tuple containing each measurement. ```python qml.enable_return() dev = qml.device(""default.qubit"", wires=2) def circuit(x): qml.Hadamard(wires=[0]) qml.CRX(x, wires=[0, 1]) return qml.probs(wires=[0]), qml.vn_entropy(wires=[0]), qml.probs(wires=1), qml.expval(qml.PauliZ(wires=1)) qnode = qml.QNode(circuit, dev) ``` ```pycon >>> res = qnode(0.5) >>> res (tensor([0.5, 0.5], requires_grad=True), tensor(0.08014815, requires_grad=True), tensor([0.96939564, 0.03060436], requires_grad=True), tensor(0.93879128, requires_grad=True)) ``` The new return types unlocks the use of `probs` mixed with diffrent measurements in backpropagation with Jax: ``` import jax qml.enable_return() dev = qml.device(""default.qubit"", wires=2) qml.enable_return() @qml.qnode(dev, interface=""jax"") def circuit(a): qml.RX(a[0], wires=0) qml.CNOT(wires=(0, 1)) qml.RY(a[1], wires=1) qml.RZ(a[2], wires=1) return qml.expval(qml.PauliZ(wires=0)), qml.probs(wires=[0, 1]), qml.vn_entropy(wires=1) x = jax.numpy.array([0.1, 0.2, 0.3]) ``` ```pycon res = jax.jacobian(circuit)(x) >>> res (DeviceArray([-9.9833414e-02, -7.4505806e-09, -3.9932679e-10], dtype=float32), DeviceArray([[-4.9419206e-02, -9.9086545e-02, 3.4938008e-09], [-4.9750542e-04, 9.9086538e-02, 1.2768372e-10], [ 4.9750548e-04, 2.4812977e-04, 4.8371929e-13], [ 4.9419202e-02, -2.4812980e-04, 2.6696912e-11]], dtype=float32), DeviceArray([ 2.9899091e-01, -4.4703484e-08, 9.5104014e-10], dtype=float32)) ``` where before the following error was raised: ```ValueError: All input arrays must have the same shape.``` """""" global __activated __activated = True ","def enable_return(): """"""Function that turns on the new return type system. The new system guarantees intuitive return types such that a sequence (e.g., list or tuple) is returned based on the `return` statement of the quantum function. This system avoids the creation of ragged arrays, where multiple measurements are stacked together. **Example** The following example shows that for multiple measurements the current PennyLane system is creating a ragged tensor. ```python dev = qml.device(""default.qubit"", wires=2) def circuit(x): qml.Hadamard(wires=[0]) qml.CRX(x, wires=[0, 1]) return qml.probs(wires=[0]), qml.vn_entropy(wires=[0]), qml.probs(wires=1), qml.expval(qml.PauliZ(wires=1)) qnode = qml.QNode(circuit, dev) ``` ```pycon >>> res = qnode(0.5) >>> res tensor([0.5 , 0.5 , 0.08014815, 0.96939564, 0.03060436, 0.93879128], requires_grad=True) ``` when you activate the new return type the results are simply a tuple containing each measurement. ```python qml.enable_return() dev = qml.device(""default.qubit"", wires=2) def circuit(x): qml.Hadamard(wires=[0]) qml.CRX(x, wires=[0, 1]) return qml.probs(wires=[0]), qml.vn_entropy(wires=[0]), qml.probs(wires=1), qml.expval(qml.PauliZ(wires=1)) qnode = qml.QNode(circuit, dev) ``` ```pycon >>> res = qnode(0.5) >>> res (tensor([0.5, 0.5], requires_grad=True), tensor(0.08014815, requires_grad=True), tensor([0.96939564, 0.03060436], requires_grad=True), tensor(0.93879128, requires_grad=True)) ``` The new return types system unlocks the use of `probs` mixed with diffrent measurements in backpropagation with Jax: ``` import jax qml.enable_return() dev = qml.device(""default.qubit"", wires=2) qml.enable_return() @qml.qnode(dev, interface=""jax"") def circuit(a): qml.RX(a[0], wires=0) qml.CNOT(wires=(0, 1)) qml.RY(a[1], wires=1) qml.RZ(a[2], wires=1) return qml.expval(qml.PauliZ(wires=0)), qml.probs(wires=[0, 1]), qml.vn_entropy(wires=1) x = jax.numpy.array([0.1, 0.2, 0.3]) ``` ```pycon res = jax.jacobian(circuit)(x) >>> res (DeviceArray([-9.9833414e-02, -7.4505806e-09, -3.9932679e-10], dtype=float32), DeviceArray([[-4.9419206e-02, -9.9086545e-02, 3.4938008e-09], [-4.9750542e-04, 9.9086538e-02, 1.2768372e-10], [ 4.9750548e-04, 2.4812977e-04, 4.8371929e-13], [ 4.9419202e-02, -2.4812980e-04, 2.6696912e-11]], dtype=float32), DeviceArray([ 2.9899091e-01, -4.4703484e-08, 9.5104014e-10], dtype=float32)) ``` where before the following error was raised: ```ValueError: All input arrays must have the same shape.``` """""" global __activated __activated = True " 27464,"def export_dataset(project_id, dataset_id, gcs_uri): """"""Export a dataset."""""" # [START automl_export_dataset] from google.cloud import automl # TODO(developer): Uncomment and set the following variables # project_id = 'YOUR_PROJECT_ID' # dataset_id = 'YOUR_DATASET_ID' # gcs_uri = 'gs://BUCKET_ID/path_to_export/' client = automl.AutoMlClient() # Get the full path of the dataset dataset_full_id = client.dataset_path( project_id, ""us-central1"", dataset_id ) gcs_destination = automl.types.GcsDestination(output_uri_prefix=gcs_uri) output_config = automl.types.OutputConfig(gcs_destination=gcs_destination) response = client.export_data(dataset_full_id, output_config) print(u""Dataset exported. {}"".format(response.result())) # [END automl_export_dataset] ","def export_dataset(project_id, dataset_id, gcs_uri): """"""Export a dataset."""""" # [START automl_export_dataset] from google.cloud import automl # TODO(developer): Uncomment and set the following variables # project_id = 'YOUR_PROJECT_ID' # dataset_id = 'YOUR_DATASET_ID' # gcs_uri = 'gs://YOUR_BUCKET_ID/path/to/export/' client = automl.AutoMlClient() # Get the full path of the dataset dataset_full_id = client.dataset_path( project_id, ""us-central1"", dataset_id ) gcs_destination = automl.types.GcsDestination(output_uri_prefix=gcs_uri) output_config = automl.types.OutputConfig(gcs_destination=gcs_destination) response = client.export_data(dataset_full_id, output_config) print(u""Dataset exported. {}"".format(response.result())) # [END automl_export_dataset] " 59144,"def confusion_matrix( orig: Union[pd.Series, np.ndarray, Sequence], new: Union[pd.Series, np.ndarray, Sequence], data: Optional[pd.DataFrame] = None, *, normalize: bool = True, ) -> pd.DataFrame: """"""Given an original and new set of labels, create a labelled confusion matrix. Params ------ orig Original labels new New labels normalize Should the confusion matrix be normalized? Usage ----- >>> import scanpy as sc; import seaborn as sns >>> pbmc = sc.datasets.pbmc68k_reduced() >>> cmtx = sc.metrics.confusion_matrix(""bulk_labels"", ""louvain"", pbmc.obs) >>> sns.heatmap(cmtx) """""" from sklearn.metrics import confusion_matrix as _confusion_matrix if data is not None: orig = data[orig] new = data[new] # Coercing so I don't have to deal with it later orig, new = pd.Series(orig), pd.Series(new) assert len(orig) == len(new) unique_labels = pd.unique(np.concatenate((orig.values, new.values))) # Compute mtx = _confusion_matrix(orig, new, labels=unique_labels) if normalize: sums = mtx.sum(axis=1)[:, np.newaxis] mtx = np.divide(mtx, sums, where=sums != 0) # Label if orig.name is None: orig_name = ""Original labels"" else: orig_name = orig.name if new.name is None: new_name = ""New labels"" else: new_name = new.name df = pd.DataFrame( mtx, index=pd.Index(unique_labels, name=orig_name), columns=pd.Index(unique_labels, name=new_name), ) # Filter if is_categorical(orig): orig_idx = pd.Series(orig).cat.categories else: orig_idx = natsorted(pd.unique(orig)) if is_categorical(new): new_idx = pd.Series(new).cat.categories else: new_idx = natsorted(pd.unique(new)) df = df.loc[np.array(orig_idx), np.array(new_idx)] return df ","def confusion_matrix( orig: Union[pd.Series, np.ndarray, Sequence], new: Union[pd.Series, np.ndarray, Sequence], data: Optional[pd.DataFrame] = None, *, normalize: bool = True, ) -> pd.DataFrame: """"""\ Given an original and new set of labels, create a labelled confusion matrix. Params ------ orig Original labels new New labels normalize Should the confusion matrix be normalized? Usage ----- >>> import scanpy as sc; import seaborn as sns >>> pbmc = sc.datasets.pbmc68k_reduced() >>> cmtx = sc.metrics.confusion_matrix(""bulk_labels"", ""louvain"", pbmc.obs) >>> sns.heatmap(cmtx) """""" from sklearn.metrics import confusion_matrix as _confusion_matrix if data is not None: orig = data[orig] new = data[new] # Coercing so I don't have to deal with it later orig, new = pd.Series(orig), pd.Series(new) assert len(orig) == len(new) unique_labels = pd.unique(np.concatenate((orig.values, new.values))) # Compute mtx = _confusion_matrix(orig, new, labels=unique_labels) if normalize: sums = mtx.sum(axis=1)[:, np.newaxis] mtx = np.divide(mtx, sums, where=sums != 0) # Label if orig.name is None: orig_name = ""Original labels"" else: orig_name = orig.name if new.name is None: new_name = ""New labels"" else: new_name = new.name df = pd.DataFrame( mtx, index=pd.Index(unique_labels, name=orig_name), columns=pd.Index(unique_labels, name=new_name), ) # Filter if is_categorical(orig): orig_idx = pd.Series(orig).cat.categories else: orig_idx = natsorted(pd.unique(orig)) if is_categorical(new): new_idx = pd.Series(new).cat.categories else: new_idx = natsorted(pd.unique(new)) df = df.loc[np.array(orig_idx), np.array(new_idx)] return df " 33544,"def look_for_global_table( context: RequestContext, table_name: str, region: str | None = None ) -> None: region: str | None = get_global_table_region(table_name=table_name, target_region=region) if region: # modify the context to query the original region where the table has been created context.region = region ","def look_for_global_table( context: RequestContext, table_name: str, region: str | None = None ) -> None: region = get_global_table_region(table_name=table_name, target_region=region) if region: # modify the context to query the original region where the table has been created context.region = region " 7149,"def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10): """"""Compute the convex hull image of a binary image. The convex hull is the set of pixels included in the smallest convex polygon that surround all white pixels in the input image. Parameters ---------- image : array Binary input image. This array is cast to bool before processing. offset_coordinates : bool, optional If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds some ""extent"" to a pixel when computing the hull. tolerance : float, optional Tolerance when determining whether a point is inside the hull. Due to numerical floating point errors, a tolerance of 0 can result in some points erroneously being classified as being outside the hull. Returns ------- hull : (M, N) array of bool Binary image with pixels in convex hull set to True. References ---------- .. [1] https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/ """""" ndim = image.ndim if np.count_nonzero(image) == 0: warn(""Input image is entirely zero, no valid convex hull. "" ""Returning empty image"", UserWarning) return np.zeros(image.shape, dtype=np.bool_) # In 2D, we do an optimisation by choosing only pixels that are # the starting or ending pixel of a row or column. This vastly # limits the number of coordinates to examine for the virtual hull. if ndim == 2: coords = possible_hull(np.ascontiguousarray(image.astype(np.uint8))) else: coords = np.transpose(np.nonzero(image)) if offset_coordinates: # when offsetting, we multiply number of vertices by 2 * ndim. # therefore, we reduce the number of coordinates by using a # convex hull on the original set, before offsetting. hull0 = ConvexHull(coords) coords = hull0.points[hull0.vertices] # Add a vertex for the middle of each pixel edge if offset_coordinates: offsets = _offsets_diamond(image.ndim) coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim) # repeated coordinates can *sometimes* cause problems in # scipy.spatial.ConvexHull, so we remove them. coords = unique_rows(coords) # Find the convex hull hull = ConvexHull(coords) vertices = hull.points[hull.vertices] # If 2D, use fast Cython function to locate convex hull pixels if ndim == 2: mask = grid_points_in_poly(image.shape, vertices) else: gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))], (ndim, -1)) # A point is in the hull if it satisfies all of the hull's inequalities coords_in_hull = np.all(hull.equations[:, :ndim].dot(gridcoords) + hull.equations[:, ndim:] < tolerance, axis=0) mask = np.reshape(coords_in_hull, image.shape) return mask ","def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10): """"""Compute the convex hull image of a binary image. The convex hull is the set of pixels included in the smallest convex polygon that surround all white pixels in the input image. Parameters ---------- image : array Binary input image. This array is cast to bool before processing. offset_coordinates : bool, optional If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds some ""extent"" to a pixel when computing the hull. tolerance : float, optional Tolerance when determining whether a point is inside the hull. Due to numerical floating point errors, a tolerance of 0 can result in some points erroneously being classified as being outside the hull. Returns ------- hull : (M, N) array of bool Binary image with pixels in convex hull set to True. References ---------- .. [1] https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/ """""" ndim = image.ndim if np.count_nonzero(image) == 0: warn(""Input image is entirely zero, no valid convex hull. "" ""Returning empty image"", UserWarning) return np.zeros(image.shape, dtype=np.bool_) # In 2D, we do an optimisation by choosing only pixels that are # the starting or ending pixel of a row or column. This vastly # limits the number of coordinates to examine for the virtual hull. if ndim == 2: coords = possible_hull(np.ascontiguousarray(image, dtype=np.uint8)) else: coords = np.transpose(np.nonzero(image)) if offset_coordinates: # when offsetting, we multiply number of vertices by 2 * ndim. # therefore, we reduce the number of coordinates by using a # convex hull on the original set, before offsetting. hull0 = ConvexHull(coords) coords = hull0.points[hull0.vertices] # Add a vertex for the middle of each pixel edge if offset_coordinates: offsets = _offsets_diamond(image.ndim) coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim) # repeated coordinates can *sometimes* cause problems in # scipy.spatial.ConvexHull, so we remove them. coords = unique_rows(coords) # Find the convex hull hull = ConvexHull(coords) vertices = hull.points[hull.vertices] # If 2D, use fast Cython function to locate convex hull pixels if ndim == 2: mask = grid_points_in_poly(image.shape, vertices) else: gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))], (ndim, -1)) # A point is in the hull if it satisfies all of the hull's inequalities coords_in_hull = np.all(hull.equations[:, :ndim].dot(gridcoords) + hull.equations[:, ndim:] < tolerance, axis=0) mask = np.reshape(coords_in_hull, image.shape) return mask " 31079,"def get_paid_packs(client: demisto_client, request_timeout: int = 999999): """"""Get premium packs from client. Trigger an API request to demisto server. Request is identical to checking the premium box through the marketplace GUI. Args: client: The demisto client to preform request on. request_timeout: Timeout of API request Returns: Dict of premium packs as found in the server. Return None if no premium packs were found. """""" request_data = \ { 'page': 0, 'size': 50, 'sort': [{ 'field': 'updated', 'asc': False }], 'general': [""generalFieldPaid""] } logging.info(f'Getting premium packs from server {client.api_client.configuration.host}:\n') # make the pack installation request response_data, status_code, _ = demisto_client.generic_request_func(client, path='/contentpacks/marketplace/search', method='POST', body=request_data, accept='application/json', _request_timeout=request_timeout) if status_code == 200: logging.debug(f'Got response data {response_data}') response = ast.literal_eval(response_data) logging.debug(f'Response dict is {response}') logging.info('Got premium packs from server.') return response[""packs""] result_object = ast.literal_eval(response_data) message = result_object.get('message', '') logging.error(f'Failed to retrieve premium packs - with status code {status_code}\n{message}\n') return None ","def get_paid_packs(client: demisto_client, request_timeout: int = 999999): """"""Get premium packs from client. Trigger an API request to demisto server. Request is identical to checking the premium box through the marketplace GUI. Args: client: The demisto client to perform the request on. request_timeout: Timeout of API request Returns: Dict of premium packs as found in the server. Return None if no premium packs were found. """""" request_data = \ { 'page': 0, 'size': 50, 'sort': [{ 'field': 'updated', 'asc': False }], 'general': [""generalFieldPaid""] } logging.info(f'Getting premium packs from server {client.api_client.configuration.host}:\n') # make the pack installation request response_data, status_code, _ = demisto_client.generic_request_func(client, path='/contentpacks/marketplace/search', method='POST', body=request_data, accept='application/json', _request_timeout=request_timeout) if status_code == 200: logging.debug(f'Got response data {response_data}') response = ast.literal_eval(response_data) logging.debug(f'Response dict is {response}') logging.info('Got premium packs from server.') return response[""packs""] result_object = ast.literal_eval(response_data) message = result_object.get('message', '') logging.error(f'Failed to retrieve premium packs - with status code {status_code}\n{message}\n') return None " 28035,"def get_argparser_ctor_args(): """""" This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """""" package_root = analyzer_context.get_context().package_root return { 'prog': 'CodeChecker checkers', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': ""Get the list of checkers available and their enabled "" ""status in the supported analyzers."", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': """""" The list of checkers that are enabled or disabled by default can be edited by editing ""profile:default"" labels in the file '{}'. Environment variables ------------------------------------------------ CC_CHECKER_LABELS_FILE Path of the checker-label mapping config file. Default: '{}' """""".format(os.path.join(package_root, 'config', 'checker_labels.json'), os.path.join(package_root, 'config', 'checker_labels.json')), # Help is shown when the ""parent"" CodeChecker command lists the # individual subcommands. 'help': ""List the checkers available for code analysis."" } ","def get_argparser_ctor_args(): """""" This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """""" package_root = analyzer_context.get_context().package_root return { 'prog': 'CodeChecker checkers', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': ""Get the list of checkers available and their enabled "" ""status in the supported analyzers."", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': """""" The list of checkers that are enabled or disabled by default can be edited by editing ""profile:default"" labels in the file '{}'. Environment variables ------------------------------------------------ CC_CHECKER_LABELS_FILE Path of the checker-label mapping config file. Default: '{}' """""".format(os.path.join(package_root, 'config', 'checker_labels.json')), # Help is shown when the ""parent"" CodeChecker command lists the # individual subcommands. 'help': ""List the checkers available for code analysis."" } " 9117,"def parse_yaml_file(driver_yaml_file): driver = os.path.splitext(driver_yaml_file)[0] + "".py"" driver_template = None with open(driver_yaml_file, 'r', encoding='utf-8') as yaml_file: driver_template = yaml.load(yaml_file, Loader=yaml.Loader) sets, queries = parse_branch(driver_template[""COMMAND_TREE""]) driver_str = ""\n\n\n"".join((header_string, string_converter, scpi_preprocessor, query_processor)) + ""\n\n\n"" driver_str += class_header for s in sorted(sets, key=str.lower): driver_str += ""\n"" + indent(s, 1) + ""\n"" for q in sorted(queries, key=str.lower): driver_str += ""\n"" + indent(q, 1) + ""\n"" with open(driver, 'w', encoding='utf8') as scpi_driver: scpi_driver.write(driver_str) ","def parse_yaml_file(driver_yaml_file): driver = os.path.splitext(driver_yaml_file)[0] + "".py"" driver_template = None with open(driver_yaml_file, 'r', encoding='utf-8') as yaml_file: driver_template = yaml.safe_load(yaml_file) sets, queries = parse_branch(driver_template[""COMMAND_TREE""]) driver_str = ""\n\n\n"".join((header_string, string_converter, scpi_preprocessor, query_processor)) + ""\n\n\n"" driver_str += class_header for s in sorted(sets, key=str.lower): driver_str += ""\n"" + indent(s, 1) + ""\n"" for q in sorted(queries, key=str.lower): driver_str += ""\n"" + indent(q, 1) + ""\n"" with open(driver, 'w', encoding='utf8') as scpi_driver: scpi_driver.write(driver_str) " 56588,"def plot_pair( ax, infdata_group, numvars, figsize, textsize, kind, scatter_kwargs, # pylint: disable=unused-argument kde_kwargs, hexbin_kwargs, gridsize, # pylint: disable=unused-argument colorbar, # pylint: disable=unused-argument divergences, diverging_mask, divergences_kwargs, # pylint: disable=unused-argument flat_var_names, backend_kwargs, marginal_kwargs, show, marginals, point_estimate, point_estimate_kwargs, point_estimate_marker_kwargs, reference_values, reference_values_kwargs, ): """"""Bokeh pair plot."""""" if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults((""dpi"", ""plot.bokeh.figure.dpi""),), **backend_kwargs, } if hexbin_kwargs is None: hexbin_kwargs = {} hexbin_kwargs.setdefault(""size"", 0.5) if marginal_kwargs is None: marginal_kwargs = {} if point_estimate_kwargs is None: point_estimate_kwargs = {} if kde_kwargs is None: kde_kwargs = {} if kind != ""kde"": kde_kwargs.setdefault(""contourf_kwargs"", {""fill_alpha"": 0}) kde_kwargs.setdefault(""contour_kwargs"", {}) kde_kwargs[""contour_kwargs""].setdefault(""line_color"", ""black"") kde_kwargs[""contour_kwargs""].setdefault(""line_alpha"", 1) if reference_values: reference_values_copy = {} label = [] for variable in list(reference_values.keys()): if "" "" in variable: variable_copy = variable.replace("" "", ""\n"", 1) else: variable_copy = variable label.append(variable_copy) reference_values_copy[variable_copy] = reference_values[variable] difference = set(flat_var_names).difference(set(label)) if difference: warn = [diff.replace(""\n"", "" "", 1) for diff in difference] warnings.warn( ""Argument reference_values does not include reference value for: {}"".format( "", "".join(warn) ), UserWarning, ) if reference_values: reference_values_copy = {} label = [] for variable in list(reference_values.keys()): if "" "" in variable: variable_copy = variable.replace("" "", ""\n"", 1) else: variable_copy = variable label.append(variable_copy) reference_values_copy[variable_copy] = reference_values[variable] difference = set(flat_var_names).difference(set(label)) for dif in difference: reference_values_copy[dif] = None if difference: warn = [dif.replace(""\n"", "" "", 1) for dif in difference] warnings.warn( ""Argument reference_values does not include reference value for: {}"".format( "", "".join(warn) ), UserWarning, ) if reference_values_kwargs is None: reference_values_kwargs = {} reference_values_kwargs.setdefault(""line_color"", ""red"") reference_values_kwargs.setdefault(""line_width"", 5) dpi = backend_kwargs.pop(""dpi"") max_plots = ( numvars ** 2 if rcParams[""plot.max_subplots""] is None else rcParams[""plot.max_subplots""] ) vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots) if vars_to_plot < numvars: warnings.warn( ""rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "" ""of resulting pair plots with these variables, generating only a "" ""{side}x{side} grid"".format(max_plots=max_plots, side=vars_to_plot), UserWarning, ) numvars = vars_to_plot if numvars == 2: offset = 1 else: offset = 2 (figsize, _, _, _, _, markersize) = _scale_fig_size( figsize, textsize, numvars - offset, numvars - offset ) if point_estimate_marker_kwargs is None: point_estimate_marker_kwargs = {} point_estimate_marker_kwargs.setdefault(""line_width"", markersize) point_estimate_kwargs.setdefault(""line_color"", ""orange"") point_estimate_kwargs.setdefault(""line_width"", 3) point_estimate_kwargs.setdefault(""line_dash"", ""solid"") tmp_flat_var_names = None if len(flat_var_names) == len(list(set(flat_var_names))): source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group])) else: tmp_flat_var_names = [""{}__{}"".format(name, str(uuid4())) for name in flat_var_names] source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group])) if divergences: divergenve_name = ""divergences_{}"".format(str(uuid4())) source_dict[divergenve_name] = np.array(diverging_mask).astype(bool).astype(int).astype(str) source = ColumnDataSource(data=source_dict) if divergences: source_nondiv = CDSView( source=source, filters=[GroupFilter(column_name=divergenve_name, group=""0"")] ) source_div = CDSView( source=source, filters=[GroupFilter(column_name=divergenve_name, group=""1"")] ) def get_width_and_height(jointplot, rotate): """"""Compute subplots dimensions for two or more variables."""""" if jointplot: if rotate: width = int(figsize[0] / (numvars - 1) + 2 * dpi) height = int(figsize[1] / (numvars - 1) * dpi) else: width = int(figsize[0] / (numvars - 1) * dpi) height = int(figsize[1] / (numvars - 1) + 2 * dpi) else: width = int(figsize[0] / (numvars - 1) * dpi) height = int(figsize[1] / (numvars - 1) * dpi) return width, height if marginals: marginals_offset = 0 else: marginals_offset = 1 if ax is None: ax = [] backend_kwargs.setdefault(""width"", int(figsize[0] / (numvars - 1) * dpi)) backend_kwargs.setdefault(""height"", int(figsize[1] / (numvars - 1) * dpi)) for row in range(numvars - marginals_offset): row_ax = [] var1 = ( flat_var_names[row + marginals_offset] if tmp_flat_var_names is None else tmp_flat_var_names[row + marginals_offset] ) for col in range(numvars - marginals_offset): var2 = ( flat_var_names[col] if tmp_flat_var_names is None else tmp_flat_var_names[col] ) backend_kwargs_copy = backend_kwargs.copy() if ""scatter"" in kind: tooltips = [ (var2, ""@{{{}}}"".format(var2)), (var1, ""@{{{}}}"".format(var1)), ] backend_kwargs_copy.setdefault(""tooltips"", tooltips) else: tooltips = None if row < col: row_ax.append(None) else: jointplot = row == col and numvars == 2 and marginals rotate = col == 1 width, height = get_width_and_height(jointplot, rotate) if jointplot: ax_ = bkp.figure(width=width, height=height, tooltips=tooltips) else: ax_ = bkp.figure(**backend_kwargs_copy) row_ax.append(ax_) ax.append(row_ax) ax = np.array(ax) else: assert ax.shape == (numvars - marginals_offset, numvars - marginals_offset) # pylint: disable=too-many-nested-blocks for i in range(0, numvars - marginals_offset): var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i] for j in range(0, numvars - marginals_offset): var2 = ( flat_var_names[j + marginals_offset] if tmp_flat_var_names is None else tmp_flat_var_names[j + marginals_offset] ) if j == i and marginals: rotate = numvars == 2 and j == 1 var1_dist = infdata_group[i] plot_dist( var1_dist, ax=ax[j, i], show=False, backend=""bokeh"", rotated=rotate, **marginal_kwargs, ) ax[j, i].xaxis.axis_label = flat_var_names[i] ax[j, i].yaxis.axis_label = flat_var_names[j + marginals_offset] elif j + marginals_offset > i: if ""scatter"" in kind: if divergences: ax[j, i].circle(var1, var2, source=source, view=source_nondiv) else: ax[j, i].circle(var1, var2, source=source) if ""kde"" in kind: var1_kde = infdata_group[i] var2_kde = infdata_group[j + marginals_offset] plot_kde( var1_kde, var2_kde, ax=ax[j, i], backend=""bokeh"", backend_kwargs={}, show=False, **kde_kwargs, ) if ""hexbin"" in kind: var1_hexbin = infdata_group[i] var2_hexbin = infdata_group[j + marginals_offset] ax[j, i].grid.visible = False ax[j, i].hexbin( var1_hexbin, var2_hexbin, **hexbin_kwargs, ) if divergences: ax[j, i].circle( var1, var2, line_color=""black"", fill_color=""orange"", line_width=1, size=10, source=source, view=source_div, ) if point_estimate: var1_pe = infdata_group[i] var2_pe = infdata_group[j] pe_x = calculate_point_estimate(point_estimate, var1_pe) pe_y = calculate_point_estimate(point_estimate, var2_pe) ax[j, i].square(pe_x, pe_y, **point_estimate_marker_kwargs) ax_hline = Span(location=pe_y, dimension=""width"", **point_estimate_kwargs,) ax_vline = Span(location=pe_x, dimension=""height"", **point_estimate_kwargs,) ax[j, i].add_layout(ax_hline) ax[j, i].add_layout(ax_vline) if marginals: ax[j - 1, i].add_layout(ax_vline) pe_last = calculate_point_estimate(point_estimate, infdata_group[-1]) ax_pe_vline = Span( location=pe_last, dimension=""height"", **point_estimate_kwargs, ) ax[-1, -1].add_layout(ax_pe_vline) if numvars == 2: ax_pe_hline = Span( location=pe_last, dimension=""width"", **point_estimate_kwargs, ) ax[-1, -1].add_layout(ax_pe_hline) if reference_values: x = reference_values_copy[flat_var_names[j + marginals_offset]] y = reference_values_copy[flat_var_names[i]] if x and y: ax[j, i].circle(y, x, **reference_values_kwargs) ax[j, i].xaxis.axis_label = flat_var_names[i] ax[j, i].yaxis.axis_label = flat_var_names[j + marginals_offset] show_layout(ax, show) return ax ","def plot_pair( ax, infdata_group, numvars, figsize, textsize, kind, scatter_kwargs, # pylint: disable=unused-argument kde_kwargs, hexbin_kwargs, gridsize, # pylint: disable=unused-argument colorbar, # pylint: disable=unused-argument divergences, diverging_mask, divergences_kwargs, # pylint: disable=unused-argument flat_var_names, backend_kwargs, marginal_kwargs, show, marginals, point_estimate, point_estimate_kwargs, point_estimate_marker_kwargs, reference_values, reference_values_kwargs, ): """"""Bokeh pair plot."""""" if backend_kwargs is None: backend_kwargs = {} backend_kwargs = { **backend_kwarg_defaults((""dpi"", ""plot.bokeh.figure.dpi""),), **backend_kwargs, } if hexbin_kwargs is None: hexbin_kwargs = {} hexbin_kwargs.setdefault(""size"", 0.5) if marginal_kwargs is None: marginal_kwargs = {} if point_estimate_kwargs is None: point_estimate_kwargs = {} if kde_kwargs is None: kde_kwargs = {} if kind != ""kde"": kde_kwargs.setdefault(""contourf_kwargs"", {}) kde_kwargs[""contourf_kwargs""].setdefaults(""fill_alpha"", 0) kde_kwargs.setdefault(""contour_kwargs"", {}) kde_kwargs[""contour_kwargs""].setdefault(""line_color"", ""black"") kde_kwargs[""contour_kwargs""].setdefault(""line_alpha"", 1) if reference_values: reference_values_copy = {} label = [] for variable in list(reference_values.keys()): if "" "" in variable: variable_copy = variable.replace("" "", ""\n"", 1) else: variable_copy = variable label.append(variable_copy) reference_values_copy[variable_copy] = reference_values[variable] difference = set(flat_var_names).difference(set(label)) if difference: warn = [diff.replace(""\n"", "" "", 1) for diff in difference] warnings.warn( ""Argument reference_values does not include reference value for: {}"".format( "", "".join(warn) ), UserWarning, ) if reference_values: reference_values_copy = {} label = [] for variable in list(reference_values.keys()): if "" "" in variable: variable_copy = variable.replace("" "", ""\n"", 1) else: variable_copy = variable label.append(variable_copy) reference_values_copy[variable_copy] = reference_values[variable] difference = set(flat_var_names).difference(set(label)) for dif in difference: reference_values_copy[dif] = None if difference: warn = [dif.replace(""\n"", "" "", 1) for dif in difference] warnings.warn( ""Argument reference_values does not include reference value for: {}"".format( "", "".join(warn) ), UserWarning, ) if reference_values_kwargs is None: reference_values_kwargs = {} reference_values_kwargs.setdefault(""line_color"", ""red"") reference_values_kwargs.setdefault(""line_width"", 5) dpi = backend_kwargs.pop(""dpi"") max_plots = ( numvars ** 2 if rcParams[""plot.max_subplots""] is None else rcParams[""plot.max_subplots""] ) vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots) if vars_to_plot < numvars: warnings.warn( ""rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "" ""of resulting pair plots with these variables, generating only a "" ""{side}x{side} grid"".format(max_plots=max_plots, side=vars_to_plot), UserWarning, ) numvars = vars_to_plot if numvars == 2: offset = 1 else: offset = 2 (figsize, _, _, _, _, markersize) = _scale_fig_size( figsize, textsize, numvars - offset, numvars - offset ) if point_estimate_marker_kwargs is None: point_estimate_marker_kwargs = {} point_estimate_marker_kwargs.setdefault(""line_width"", markersize) point_estimate_kwargs.setdefault(""line_color"", ""orange"") point_estimate_kwargs.setdefault(""line_width"", 3) point_estimate_kwargs.setdefault(""line_dash"", ""solid"") tmp_flat_var_names = None if len(flat_var_names) == len(list(set(flat_var_names))): source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group])) else: tmp_flat_var_names = [""{}__{}"".format(name, str(uuid4())) for name in flat_var_names] source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group])) if divergences: divergenve_name = ""divergences_{}"".format(str(uuid4())) source_dict[divergenve_name] = np.array(diverging_mask).astype(bool).astype(int).astype(str) source = ColumnDataSource(data=source_dict) if divergences: source_nondiv = CDSView( source=source, filters=[GroupFilter(column_name=divergenve_name, group=""0"")] ) source_div = CDSView( source=source, filters=[GroupFilter(column_name=divergenve_name, group=""1"")] ) def get_width_and_height(jointplot, rotate): """"""Compute subplots dimensions for two or more variables."""""" if jointplot: if rotate: width = int(figsize[0] / (numvars - 1) + 2 * dpi) height = int(figsize[1] / (numvars - 1) * dpi) else: width = int(figsize[0] / (numvars - 1) * dpi) height = int(figsize[1] / (numvars - 1) + 2 * dpi) else: width = int(figsize[0] / (numvars - 1) * dpi) height = int(figsize[1] / (numvars - 1) * dpi) return width, height if marginals: marginals_offset = 0 else: marginals_offset = 1 if ax is None: ax = [] backend_kwargs.setdefault(""width"", int(figsize[0] / (numvars - 1) * dpi)) backend_kwargs.setdefault(""height"", int(figsize[1] / (numvars - 1) * dpi)) for row in range(numvars - marginals_offset): row_ax = [] var1 = ( flat_var_names[row + marginals_offset] if tmp_flat_var_names is None else tmp_flat_var_names[row + marginals_offset] ) for col in range(numvars - marginals_offset): var2 = ( flat_var_names[col] if tmp_flat_var_names is None else tmp_flat_var_names[col] ) backend_kwargs_copy = backend_kwargs.copy() if ""scatter"" in kind: tooltips = [ (var2, ""@{{{}}}"".format(var2)), (var1, ""@{{{}}}"".format(var1)), ] backend_kwargs_copy.setdefault(""tooltips"", tooltips) else: tooltips = None if row < col: row_ax.append(None) else: jointplot = row == col and numvars == 2 and marginals rotate = col == 1 width, height = get_width_and_height(jointplot, rotate) if jointplot: ax_ = bkp.figure(width=width, height=height, tooltips=tooltips) else: ax_ = bkp.figure(**backend_kwargs_copy) row_ax.append(ax_) ax.append(row_ax) ax = np.array(ax) else: assert ax.shape == (numvars - marginals_offset, numvars - marginals_offset) # pylint: disable=too-many-nested-blocks for i in range(0, numvars - marginals_offset): var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i] for j in range(0, numvars - marginals_offset): var2 = ( flat_var_names[j + marginals_offset] if tmp_flat_var_names is None else tmp_flat_var_names[j + marginals_offset] ) if j == i and marginals: rotate = numvars == 2 and j == 1 var1_dist = infdata_group[i] plot_dist( var1_dist, ax=ax[j, i], show=False, backend=""bokeh"", rotated=rotate, **marginal_kwargs, ) ax[j, i].xaxis.axis_label = flat_var_names[i] ax[j, i].yaxis.axis_label = flat_var_names[j + marginals_offset] elif j + marginals_offset > i: if ""scatter"" in kind: if divergences: ax[j, i].circle(var1, var2, source=source, view=source_nondiv) else: ax[j, i].circle(var1, var2, source=source) if ""kde"" in kind: var1_kde = infdata_group[i] var2_kde = infdata_group[j + marginals_offset] plot_kde( var1_kde, var2_kde, ax=ax[j, i], backend=""bokeh"", backend_kwargs={}, show=False, **kde_kwargs, ) if ""hexbin"" in kind: var1_hexbin = infdata_group[i] var2_hexbin = infdata_group[j + marginals_offset] ax[j, i].grid.visible = False ax[j, i].hexbin( var1_hexbin, var2_hexbin, **hexbin_kwargs, ) if divergences: ax[j, i].circle( var1, var2, line_color=""black"", fill_color=""orange"", line_width=1, size=10, source=source, view=source_div, ) if point_estimate: var1_pe = infdata_group[i] var2_pe = infdata_group[j] pe_x = calculate_point_estimate(point_estimate, var1_pe) pe_y = calculate_point_estimate(point_estimate, var2_pe) ax[j, i].square(pe_x, pe_y, **point_estimate_marker_kwargs) ax_hline = Span(location=pe_y, dimension=""width"", **point_estimate_kwargs,) ax_vline = Span(location=pe_x, dimension=""height"", **point_estimate_kwargs,) ax[j, i].add_layout(ax_hline) ax[j, i].add_layout(ax_vline) if marginals: ax[j - 1, i].add_layout(ax_vline) pe_last = calculate_point_estimate(point_estimate, infdata_group[-1]) ax_pe_vline = Span( location=pe_last, dimension=""height"", **point_estimate_kwargs, ) ax[-1, -1].add_layout(ax_pe_vline) if numvars == 2: ax_pe_hline = Span( location=pe_last, dimension=""width"", **point_estimate_kwargs, ) ax[-1, -1].add_layout(ax_pe_hline) if reference_values: x = reference_values_copy[flat_var_names[j + marginals_offset]] y = reference_values_copy[flat_var_names[i]] if x and y: ax[j, i].circle(y, x, **reference_values_kwargs) ax[j, i].xaxis.axis_label = flat_var_names[i] ax[j, i].yaxis.axis_label = flat_var_names[j + marginals_offset] show_layout(ax, show) return ax " 30831,"def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) ","def get_security_profiles_command(): """""" Get information about profiles. """""" security_profile = demisto.args().get('security_profile') if security_profile: xpath = f'{XPATH_RULEBASE}profiles/{security_profile}' else: xpath = f'{XPATH_RULEBASE}profiles' result = get_security_profile(xpath) if security_profile: security_profiles = result.get('response', {}).get('result', {}) else: security_profiles = result.get('response', {}).get('result', {}).get('profiles', {}) if '@dirtyId' in security_profiles: LOG(f'Found uncommitted item:\n{security_profiles}') raise Exception('Please commit the instance prior to getting the security profiles.') human_readable = '' content: List[Dict[str, Any]] = [] context = {} if 'spyware' in security_profiles: profiles = security_profiles.get('spyware').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': spyware_rules }) else: rules = profiles.get('rules', {}).get('entry', []) spyware_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': spyware_rules } human_readable = tableToMarkdown('Anti Spyware Profiles', content) context.update({""Panorama.Spyware(val.Name == obj.Name)"": content}) if 'virus' in security_profiles: profiles = security_profiles.get('virus').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Decoder': antivirus_rules }) else: rules = profiles.get('decoder', {}).get('entry', []) antivirus_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': antivirus_rules } human_readable += tableToMarkdown('Antivirus Profiles', content) context.update({""Panorama.Antivirus(val.Name == obj.Name)"": content}) if 'file-blocking' in security_profiles: profiles = security_profiles.get('file-blocking').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': file_blocking_rules }) else: rules = profiles.get('rules', {}).get('entry', []) file_blocking_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': file_blocking_rules } human_readable += tableToMarkdown('File Blocking Profiles', content) context.update({""Panorama.FileBlocking(val.Name == obj.Name)"": content}) if 'vulnerability' in security_profiles: profiles = security_profiles.get('vulnerability').get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': vulnerability_rules }) else: rules = profiles.get('rules', {}).get('entry', []) vulnerability_rules = prettify_profiles_rules(rules) content = { 'Name': profiles['@name'], 'Rules': vulnerability_rules } human_readable += tableToMarkdown('vulnerability Protection Profiles', content) context.update({""Panorama.Vulnerability(val.Name == obj.Name)"": content}) if 'data-filtering' in security_profiles: profiles = security_profiles.get('data-filtering', {}).get('entry', {}) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': data_filtering_rules }) else: rules = profiles.get('rules', {}).get('entry', []) data_filtering_rules = prettify_data_filtering_rules(rules) content = { 'Name': profiles['@name'], 'Rules': data_filtering_rules } human_readable += tableToMarkdown('Data Filtering Profiles', content) context.update({""Panorama.DataFiltering(val.Name == obj.Name)"": content}) if 'url-filtering' in security_profiles: profiles = security_profiles.get('url-filtering').get('entry', []) if isinstance(profiles, list): for profile in profiles: url_filtering_rules = prettify_get_url_filter(profile) content.append({ 'Name': profile['@name'], 'Rules': url_filtering_rules }) else: url_filtering_rules = prettify_get_url_filter(profiles) content = { 'Name': profiles['@name'], 'Rules': url_filtering_rules } human_readable += tableToMarkdown('URL Filtering Profiles', content) context.update({""Panorama.URLFilter(val.Name == obj.Name)"": content}) if 'wildfire-analysis' in security_profiles: profiles = security_profiles.get('wildfire-analysis').get('entry', []) if isinstance(profiles, list): for profile in profiles: rules = profile.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content.append({ 'Name': profile['@name'], 'Rules': wildfire_rules }) else: rules = profiles.get('rules', {}).get('entry', []) wildfire_rules = prettify_wildfire_rules(rules) content = { 'Name': profiles['@name'], 'Rules': wildfire_rules } human_readable += tableToMarkdown('WildFire Profiles', content) context.update({""Panorama.WildFire(val.Name == obj.Name)"": content}) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': context }) " 23620,"def fit_sandia(curves, p_ac_0, p_nt): r''' Determine parameters for the Sandia inverter model from efficiency curves. Parameters ---------- curves : DataFrame Columns must be ``'fraction_of_rated_power'``, ``'dc_voltage_level'``, ``'dc_voltage'``, ``'ac_power'``, ``'efficiency'``. See notes for the definition and unit for each column. p_ac_0 : numeric Rated AC power of the inverter [W]. p_nt : numeric Night tare, i.e., power consumed while inverter is not delivering AC power. [W] Returns ------- dict with parameters for the Sandia inverter model. See :py:func:`snl_inverter` for a description of entries in the returned dict. See Also -------- snlinverter Notes ----- An inverter efficiency curve comprises a series of pairs ('fraction_of_rated_power', 'efficiency'), e.g. (0.1, 0.5), (0.2, 0.7), etc. at a specified DC voltage level and AC power level. The DataFrame `curves` must contain at least one efficiency curve for each combination of DC voltage level and AC power level. Columns in `curves` must be the following: ================ ======================================== Column name Description ================ ======================================== 'fraction_of_rated_power' Fraction of rated AC power `p_ac_0`. The CEC inverter test protocol specifies values of 0.1, 0.2, 0.3, 0.5, 0.75 and 1.0. [unitless] 'dc_voltage_level' Must be 'Vmin', 'Vnom', or 'Vmax'. Curves must be provided for all three voltage levels. At least one curve must be provided for each combination of fraction_of_rated_power and dc_voltage_level. 'dc_voltage' DC input voltage. [V] 'ac_power' Output AC power. [W] 'efficiency' Ratio of AC output power to DC input power. [unitless] For each curve, DC input power is calculated from AC power and efficiency. References ---------- .. [1] SAND2007-5036, ""Performance Model for Grid-Connected Photovoltaic Inverters by D. King, S. Gonzalez, G. Galbraith, W. Boyson .. [2] Sandia Inverter Model page, PV Performance Modeling Collaborative https://pvpmc.sandia.gov/modeling-steps/dc-to-ac-conversion/sandia-inverter-model/ # noqa: E501 ''' voltage_levels = ['Vmin', 'Vnom', 'Vmax'] # average dc input voltage at each voltage level v_d = np.array( [curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmin'].mean(), curves['dc_voltage'][curves['dc_voltage_level'] == 'Vnom'].mean(), curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmax'].mean()]) v_nom = v_d[1] # model parameter # independent variable for regressions, x_d x_d = v_d - v_nom curves['dc_power'] = curves['ac_power'] / curves['efficiency'] # empty dataframe to contain intermediate variables coeffs = pd.DataFrame(index=voltage_levels, columns=['a', 'b', 'c', 'p_dc', 'p_s0'], data=np.nan) def solve_quad(a, b, c): return (-b + (b**2 - 4 * a * c)**.5) / (2 * a) # [2] STEP 3E, fit a line to (DC voltage, model_coefficient) def extract_c(x_d, add): test = polyfit(x_d, add, 1) beta0, beta1 = test c = beta1 / beta0 return beta0, beta1, c for d in voltage_levels: x = curves['dc_power'][curves['dc_voltage_level'] == d] y = curves['ac_power'][curves['dc_voltage_level'] == d] # [2] STEP 3B # fit a quadratic to (DC power, AC power) c, b, a = polyfit(x, y, 2) # [2] STEP 3D, solve for p_dc and p_s0 p_dc = solve_quad(a, b, (c - p_ac_0)) p_s0 = solve_quad(a, b, c) # Add values to dataframe at index d coeffs['a'][d] = a coeffs['b'][d] = b coeffs['c'][d] = c coeffs['p_dc'][d] = p_dc coeffs['p_s0'][d] = p_s0 b_dc0, b_dc1, c1 = extract_c(x_d, coeffs['p_dc']) b_s0, b_s1, c2 = extract_c(x_d, coeffs['p_s0']) b_c0, b_c1, c3 = extract_c(x_d, coeffs['a']) p_dc0 = b_dc0 p_s0 = b_s0 c0 = b_c0 # prepare dict and return return {'Paco': p_ac_0, 'Pdco': p_dc0, 'Vdco': v_nom, 'Pso': p_s0, 'C0': c0, 'C1': c1, 'C2': c2, 'C3': c3, 'Pnt': p_nt} ","def fit_sandia(curves, p_ac_0, p_nt): r''' Determine parameters for the Sandia inverter model from efficiency curves. Parameters ---------- curves : DataFrame Columns must be ``'fraction_of_rated_power'``, ``'dc_voltage_level'``, ``'dc_voltage'``, ``'ac_power'``, ``'efficiency'``. See notes for the definition and unit for each column. p_ac_0 : numeric Rated AC power of the inverter [W]. p_nt : numeric Night tare, i.e., power consumed while inverter is not delivering AC power. [W] Returns ------- dict with parameters for the Sandia inverter model. See :py:func:`snl_inverter` for a description of entries in the returned dict. See Also -------- pvlib.inverter.sandia Notes ----- An inverter efficiency curve comprises a series of pairs ('fraction_of_rated_power', 'efficiency'), e.g. (0.1, 0.5), (0.2, 0.7), etc. at a specified DC voltage level and AC power level. The DataFrame `curves` must contain at least one efficiency curve for each combination of DC voltage level and AC power level. Columns in `curves` must be the following: ================ ======================================== Column name Description ================ ======================================== 'fraction_of_rated_power' Fraction of rated AC power `p_ac_0`. The CEC inverter test protocol specifies values of 0.1, 0.2, 0.3, 0.5, 0.75 and 1.0. [unitless] 'dc_voltage_level' Must be 'Vmin', 'Vnom', or 'Vmax'. Curves must be provided for all three voltage levels. At least one curve must be provided for each combination of fraction_of_rated_power and dc_voltage_level. 'dc_voltage' DC input voltage. [V] 'ac_power' Output AC power. [W] 'efficiency' Ratio of AC output power to DC input power. [unitless] For each curve, DC input power is calculated from AC power and efficiency. References ---------- .. [1] SAND2007-5036, ""Performance Model for Grid-Connected Photovoltaic Inverters by D. King, S. Gonzalez, G. Galbraith, W. Boyson .. [2] Sandia Inverter Model page, PV Performance Modeling Collaborative https://pvpmc.sandia.gov/modeling-steps/dc-to-ac-conversion/sandia-inverter-model/ # noqa: E501 ''' voltage_levels = ['Vmin', 'Vnom', 'Vmax'] # average dc input voltage at each voltage level v_d = np.array( [curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmin'].mean(), curves['dc_voltage'][curves['dc_voltage_level'] == 'Vnom'].mean(), curves['dc_voltage'][curves['dc_voltage_level'] == 'Vmax'].mean()]) v_nom = v_d[1] # model parameter # independent variable for regressions, x_d x_d = v_d - v_nom curves['dc_power'] = curves['ac_power'] / curves['efficiency'] # empty dataframe to contain intermediate variables coeffs = pd.DataFrame(index=voltage_levels, columns=['a', 'b', 'c', 'p_dc', 'p_s0'], data=np.nan) def solve_quad(a, b, c): return (-b + (b**2 - 4 * a * c)**.5) / (2 * a) # [2] STEP 3E, fit a line to (DC voltage, model_coefficient) def extract_c(x_d, add): test = polyfit(x_d, add, 1) beta0, beta1 = test c = beta1 / beta0 return beta0, beta1, c for d in voltage_levels: x = curves['dc_power'][curves['dc_voltage_level'] == d] y = curves['ac_power'][curves['dc_voltage_level'] == d] # [2] STEP 3B # fit a quadratic to (DC power, AC power) c, b, a = polyfit(x, y, 2) # [2] STEP 3D, solve for p_dc and p_s0 p_dc = solve_quad(a, b, (c - p_ac_0)) p_s0 = solve_quad(a, b, c) # Add values to dataframe at index d coeffs['a'][d] = a coeffs['b'][d] = b coeffs['c'][d] = c coeffs['p_dc'][d] = p_dc coeffs['p_s0'][d] = p_s0 b_dc0, b_dc1, c1 = extract_c(x_d, coeffs['p_dc']) b_s0, b_s1, c2 = extract_c(x_d, coeffs['p_s0']) b_c0, b_c1, c3 = extract_c(x_d, coeffs['a']) p_dc0 = b_dc0 p_s0 = b_s0 c0 = b_c0 # prepare dict and return return {'Paco': p_ac_0, 'Pdco': p_dc0, 'Vdco': v_nom, 'Pso': p_s0, 'C0': c0, 'C1': c1, 'C2': c2, 'C3': c3, 'Pnt': p_nt} " 13422,"def test_player_corner_cases(): n, m = 3, 4 player = Player(np.zeros((n, m))) for action in range(n): assert_(player.is_best_response(action, [1 / m] * m)) for method in LP_METHODS: assert_(player.is_dominated(action, method=method) == False) e = 1e-8 * 2 player = Player([[-e, -e], [1, -1], [-1, 1]]) action = 0 assert_(player.is_best_response(action, [1 / 2, 1 / 2], tol=e)) assert_(not player.is_best_response(action, [1 / 2, 1 / 2], tol=e / 2)) for method in LP_METHODS: assert_(player.is_dominated(action, tol=2 * e, method=method) == False) assert_(player.dominated_actions(tol=2 * e, method=method) == []) assert_(player.is_dominated(action, tol=e / 2, method=method)) assert_(player.dominated_actions(tol=e / 2, method=method) == [action]) ","def test_player_corner_cases(): n, m = 3, 4 player = Player(np.zeros((n, m))) for action in range(n): assert_(player.is_best_response(action, [1 / m] * m)) for method in LP_METHODS: assert_(player.is_dominated(action, method=method) is False) e = 1e-8 * 2 player = Player([[-e, -e], [1, -1], [-1, 1]]) action = 0 assert_(player.is_best_response(action, [1 / 2, 1 / 2], tol=e)) assert_(not player.is_best_response(action, [1 / 2, 1 / 2], tol=e / 2)) for method in LP_METHODS: assert_(player.is_dominated(action, tol=2 * e, method=method) == False) assert_(player.dominated_actions(tol=2 * e, method=method) == []) assert_(player.is_dominated(action, tol=e / 2, method=method)) assert_(player.dominated_actions(tol=e / 2, method=method) == [action]) " 34371,"def export_trackers(args: argparse.Namespace) -> None: """"""Export events for a connected tracker store using an event broker. Args: args: Command-line arguments to process. """""" _inspect_timestamp_options(args) endpoints = _get_available_endpoints(args.endpoints) rasa_tracker_store = _get_rasa_tracker_store(endpoints) event_broker = _get_event_broker(endpoints) _prepare_pika_producer(event_broker) requested_conversation_ids = _get_requested_conversation_ids(args.conversation_ids) conversation_ids_to_process = _get_conversation_ids_to_process( rasa_tracker_store, requested_conversation_ids ) _publish_events( rasa_tracker_store, event_broker, conversation_ids_to_process, args.minimum_timestamp, args.maximum_timestamp, args.endpoints, requested_conversation_ids, ) ","def export_trackers(args: argparse.Namespace) -> None: """"""Export events for a connected tracker store using an event broker. Args: args: Command-line arguments to process. """""" _validate_timestamp_options(args) endpoints = _get_available_endpoints(args.endpoints) rasa_tracker_store = _get_rasa_tracker_store(endpoints) event_broker = _get_event_broker(endpoints) _prepare_pika_producer(event_broker) requested_conversation_ids = _get_requested_conversation_ids(args.conversation_ids) conversation_ids_to_process = _get_conversation_ids_to_process( rasa_tracker_store, requested_conversation_ids ) _publish_events( rasa_tracker_store, event_broker, conversation_ids_to_process, args.minimum_timestamp, args.maximum_timestamp, args.endpoints, requested_conversation_ids, ) " 32263,"def main(command: str, demisto_params: dict): box_credentials = BoxCredentials.parse_raw( demisto_params['credentials_json']['password'] ) request = BoxEventsRequest( params=BoxEventsParams.parse_obj(demisto_params), **demisto_params, ) # If you're not using basic auth or Bearer __token_, you should implement your own # set_authorization(request, demisto_params['auth_credendtials']) options = IntegrationOptions.parse_obj(demisto_params) client = BoxEventsClient(request, options, box_credentials) get_events = BoxGetEvents(client, options) if command == 'test-module': get_events.client.request.params.limit = 1 get_events.run() demisto.results('ok') return demisto.debug('not in test module, running box-get-events') events = get_events.run() demisto.debug(f'got {len(events)=} from api') if command == 'box-get-events': demisto.debug('box-get-events, publishing events to incident') return_results(CommandResults('BoxEvents', 'event_id', events)) else: demisto.debug('in event collection') if events: demisto.debug('publishing events') demisto.setLastRun(get_events.get_last_run()) send_events_to_xsiam(events, 'box', 'box') else: demisto.debug('no events found, finishing script.') ","def main(command: str, demisto_params: dict): box_credentials = BoxCredentials.parse_raw( demisto_params['credentials_json']['password'] ) request = BoxEventsRequest( params=BoxEventsParams.parse_obj(demisto_params), **demisto_params, ) # If you're not using basic auth or Bearer __token__, you should implement your own # set_authorization(request, demisto_params['auth_credendtials']) options = IntegrationOptions.parse_obj(demisto_params) client = BoxEventsClient(request, options, box_credentials) get_events = BoxGetEvents(client, options) if command == 'test-module': get_events.client.request.params.limit = 1 get_events.run() demisto.results('ok') return demisto.debug('not in test module, running box-get-events') events = get_events.run() demisto.debug(f'got {len(events)=} from api') if command == 'box-get-events': demisto.debug('box-get-events, publishing events to incident') return_results(CommandResults('BoxEvents', 'event_id', events)) else: demisto.debug('in event collection') if events: demisto.debug('publishing events') demisto.setLastRun(get_events.get_last_run()) send_events_to_xsiam(events, 'box', 'box') else: demisto.debug('no events found, finishing script.') " 23288,"def copy_and_keep_build(site): """""" Copies each site into the target location and keep last ""n"" builts as backups """""" global error_count for wiki in ALL_WIKIS: if site=='common': continue if not site==None and not site==wiki: continue debug('copy: %s' % wiki) targetdir = os.path.join(args.destdir, wiki) debug(""Creating temporary folders"") previousdir = os.path.join(args.backupdestdir) debug('Recreating %s' % previousdir ) if not os.path.exists(previousdir): os.mkdir(previousdir) olddir = os.path.join(previousdir, str(building_time) ) debug('Recreating %s' % olddir ) if not os.path.exists(olddir): os.mkdir(olddir) if os.path.exists(targetdir): debug('Moving %s into %s' % (targetdir,olddir) ) shutil.move(targetdir, olddir) sourcedir='./%s/build/html/' % wiki html_moved_dir = os.path.join(args.destdir, 'html') try: subprocess.check_call(['mv', sourcedir, html_moved_dir]) # Rename move! (single move to html/* failed) subprocess.check_call(['mv', html_moved_dir ,targetdir]) debug(""Moved to %s"" % targetdir) except: error(""FAIL moving output to %s"" % targetdir) finally: debug(""Creating a backup in %s"" % olddir) subprocess.check_call(['cp', '-r', targetdir ,olddir]) delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM) ","def copy_and_keep_build(site): """""" Copies each site into the target location and keep last ""n"" builts as backups """""" global error_count for wiki in ALL_WIKIS: if site=='common': continue if site is not None and site != wiki: continue debug('copy: %s' % wiki) targetdir = os.path.join(args.destdir, wiki) debug(""Creating temporary folders"") previousdir = os.path.join(args.backupdestdir) debug('Recreating %s' % previousdir ) if not os.path.exists(previousdir): os.mkdir(previousdir) olddir = os.path.join(previousdir, str(building_time) ) debug('Recreating %s' % olddir ) if not os.path.exists(olddir): os.mkdir(olddir) if os.path.exists(targetdir): debug('Moving %s into %s' % (targetdir,olddir) ) shutil.move(targetdir, olddir) sourcedir='./%s/build/html/' % wiki html_moved_dir = os.path.join(args.destdir, 'html') try: subprocess.check_call(['mv', sourcedir, html_moved_dir]) # Rename move! (single move to html/* failed) subprocess.check_call(['mv', html_moved_dir ,targetdir]) debug(""Moved to %s"" % targetdir) except: error(""FAIL moving output to %s"" % targetdir) finally: debug(""Creating a backup in %s"" % olddir) subprocess.check_call(['cp', '-r', targetdir ,olddir]) delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM) " 32186,"def fetch_incidents(client: AzureSentinelClient, last_run: dict, first_fetch_time: str, min_severity: int): """"""Fetching incidents. Args: first_fetch_time: The first fetch time. client: An AzureSentinelClient client. last_run: An dictionary of the last run. min_severity: A minimum severity of incidents to fetch. Returns: (tuple): 1. The LastRun object updated with the last run details. 2. An array of incidents. """""" # Get the last fetch details, if exist last_fetch_time = last_run.get('last_fetch_time') last_fetch_ids = last_run.get('last_fetch_ids', []) last_incident_number = last_run.get('last_incident_number') demisto.debug(f""{last_fetch_time=}, {last_fetch_ids=}, {last_incident_number=}"") if last_fetch_time is None or last_incident_number is None: demisto.debug(""handle via timestamp"") if last_fetch_time is None: last_fetch_time_str, _ = parse_date_range(first_fetch_time, DATE_FORMAT) latest_created_time = dateparser.parse(last_fetch_time_str) else: latest_created_time = dateparser.parse(last_fetch_time) assert latest_created_time is not None latest_created_time_str = latest_created_time.strftime(DATE_FORMAT) command_args = { 'filter': f'properties/createdTimeUtc ge {latest_created_time_str}', 'orderby': 'properties/createdTimeUtc asc', } raw_incidents = list_incidents_command(client, command_args, is_fetch_incidents=True).outputs else: demisto.debug(""handle via id"") latest_created_time = dateparser.parse(last_fetch_time) assert latest_created_time is not None, f""dateparser.parse(last_fetch_time):"" \ f"" {dateparser.parse(last_fetch_time)} couldnt be parsed"" command_args = { 'filter': f'properties/incidentNumber gt {last_incident_number}', 'orderby': 'properties/incidentNumber asc', } raw_incidents = list_incidents_command(client, command_args, is_fetch_incidents=True).outputs return process_incidents(raw_incidents, last_fetch_ids, min_severity, latest_created_time, last_incident_number) # type: ignore ","def fetch_incidents(client: AzureSentinelClient, last_run: dict, first_fetch_time: str, min_severity: int): """"""Fetching incidents. Args: first_fetch_time: The first fetch time. client: An AzureSentinelClient client. last_run: An dictionary of the last run. min_severity: A minimum severity of incidents to fetch. Returns: (tuple): 1. The LastRun object updated with the last run details. 2. An array of incidents. """""" # Get the last fetch details, if exist last_fetch_time = last_run.get('last_fetch_time') last_fetch_ids = last_run.get('last_fetch_ids', []) last_incident_number = last_run.get('last_incident_number') demisto.debug(f""{last_fetch_time=}, {last_fetch_ids=}, {last_incident_number=}"") if last_fetch_time is None or last_incident_number is None: demisto.debug(""handle via timestamp"") if last_fetch_time is None: last_fetch_time_str, _ = parse_date_range(first_fetch_time, DATE_FORMAT) latest_created_time = dateparser.parse(last_fetch_time_str) else: latest_created_time = dateparser.parse(last_fetch_time) assert latest_created_time, f'Got empty latest_created_time. {last_fetch_time_str=} {last_fetch_time=}' latest_created_time_str = latest_created_time.strftime(DATE_FORMAT) command_args = { 'filter': f'properties/createdTimeUtc ge {latest_created_time_str}', 'orderby': 'properties/createdTimeUtc asc', } raw_incidents = list_incidents_command(client, command_args, is_fetch_incidents=True).outputs else: demisto.debug(""handle via id"") latest_created_time = dateparser.parse(last_fetch_time) assert latest_created_time is not None, f""dateparser.parse(last_fetch_time):"" \ f"" {dateparser.parse(last_fetch_time)} couldnt be parsed"" command_args = { 'filter': f'properties/incidentNumber gt {last_incident_number}', 'orderby': 'properties/incidentNumber asc', } raw_incidents = list_incidents_command(client, command_args, is_fetch_incidents=True).outputs return process_incidents(raw_incidents, last_fetch_ids, min_severity, latest_created_time, last_incident_number) # type: ignore " 57670,"def build_where_clause(args: dict) -> str: """""" This function transforms the relevant entries of dict into the where part of a SQL query Args: args: The arguments dict Returns: A string represents the where part of a SQL query """""" args_dict = { 'source_ip': 'source_ip.value', 'dest_ip': 'dest_ip.value', 'rule_matched': 'rule_matched', 'from_zone': 'from_zone', 'to_zone': 'to_zone', 'source_port': 'source_port', 'dest_port': 'dest_port', 'action': 'action.value', 'file_sha_256': 'file_sha_256', 'file_name': 'file_name', } if args.get('ip') and args.get('source_ip') or args.get('ip') and args.get('dest_ip'): raise DemistoException('Error: You cant enter the ""ip"" argument with either ""source_ip"" nor ""dest_ip"" ' 'arguments') if args.get('port') and args.get('source_port') or args.get('port') and args.get('dest_port'): raise DemistoException('Error: You cant enter the ""port"" argument with either ""source_port"" nor ""dest_port"" ' 'arguments') non_string_keys = {'dest_port', 'source_port'} if 'query' in args: # if query arg is supplied than we just need to parse it and only it return args['query'].strip() where_clause = '' if args.get('ip'): ips = argToList(args.pop('ip')) where_clause += '(' + ' OR '.join(f'source_ip.value = ""{ip}"" OR dest_ip.value = ""{ip}""' for ip in ips) + ')' if any(args.get(key) for key in args_dict) or args.get('port'): where_clause += ' AND ' if args.get('port'): ports = argToList(args.pop('port')) where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')' if any(args.get(key) for key in args_dict): where_clause += ' AND ' # We want to add only keys that are part of the query string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys} or_statements = [] for key, values in string_query_fields.items(): string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = ""{value}""' for value in string_values_list])) # ports are digested as ints and cannot be sent as strings non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys} for key, values in non_string_query_fields.items(): non_string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list])) where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement]) return where_clause ","def build_where_clause(args: dict) -> str: """""" This function transforms the relevant entries of dict into the where part of a SQL query Args: args: The arguments dict Returns: A string represents the where part of a SQL query """""" args_dict = { 'source_ip': 'source_ip.value', 'dest_ip': 'dest_ip.value', 'rule_matched': 'rule_matched', 'from_zone': 'from_zone', 'to_zone': 'to_zone', 'source_port': 'source_port', 'dest_port': 'dest_port', 'action': 'action.value', 'file_sha_256': 'file_sha_256', 'file_name': 'file_name', } if args.get('ip') and args.get('source_ip') or args.get('ip') and args.get('dest_ip'): raise DemistoException('Error: You cant enter the ""ip"" argument with either ""source_ip"" nor ""dest_ip"" ' 'arguments') if args.get('port') and args.get('source_port') or args.get('port') and args.get('dest_port'): raise DemistoException('Error: ""ip"" argument cannot appear with either ""source_ip"" nor ""dest_ip"" ' 'arguments') non_string_keys = {'dest_port', 'source_port'} if 'query' in args: # if query arg is supplied than we just need to parse it and only it return args['query'].strip() where_clause = '' if args.get('ip'): ips = argToList(args.pop('ip')) where_clause += '(' + ' OR '.join(f'source_ip.value = ""{ip}"" OR dest_ip.value = ""{ip}""' for ip in ips) + ')' if any(args.get(key) for key in args_dict) or args.get('port'): where_clause += ' AND ' if args.get('port'): ports = argToList(args.pop('port')) where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')' if any(args.get(key) for key in args_dict): where_clause += ' AND ' # We want to add only keys that are part of the query string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys} or_statements = [] for key, values in string_query_fields.items(): string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = ""{value}""' for value in string_values_list])) # ports are digested as ints and cannot be sent as strings non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys} for key, values in non_string_query_fields.items(): non_string_values_list: list = argToList(values) field = args_dict[key] or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list])) where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement]) return where_clause " 59401,"def crz(self, theta, ctl, tgt): """"""Apply cRz gate from a specified control (ctl) to target (tgt) qubit with angle theta. The cRz gate applies Rz gate on the target qubit when the control qubit is in state |1>. Examples: Construct a circuit with cRz gate. .. jupyter-execute:: from qiskit import QuantumCircuit circuit = QuantumCircuit(2) circuit.x(0) # This brings the quantum state from |0> to |1> circuit.ry(numpy.pi/2,1) # This brings the quantum state from |0> to |+> theta = numpy.pi/2 circuit.crz(theta,0,1) circuit.draw() Resulting Statevector: [ 0+0j, 0.5-0.5j, 0+0j, 0.5+0.5j ] """""" return self.append(CrzGate(theta), [ctl, tgt], []) ","def crz(self, theta, ctl, tgt): """"""Apply cRz gate from a specified control (ctl) to target (tgt) qubit with angle theta. The cRz gate applies Rz gate on the target qubit when the control qubit is in state |1>. Examples: Construct a circuit with cRz gate. .. jupyter-execute:: import numpy from qiskit import QuantumCircuit circuit = QuantumCircuit(2) circuit.x(0) # This brings the quantum state from |0> to |1> circuit.ry(numpy.pi/2,1) # This brings the quantum state from |0> to |+> theta = numpy.pi/2 circuit.crz(theta,0,1) circuit.draw() Resulting Statevector: [ 0+0j, 0.5-0.5j, 0+0j, 0.5+0.5j ] """""" return self.append(CrzGate(theta), [ctl, tgt], []) " 3521,"def rtd_parse_tags(tag_string): """""" Parses a string into its tags. - Lowercases all tags - Converts underscores to hyphens - Slugifies tags - Removes empty tags :see: https://django-taggit.readthedocs.io/page/custom_tagging.html :param tag_string: a delimited string of tags :return: a sorted list of tag strings """""" if tag_string: tag_string = tag_string.lower().replace('_', '-') tags = [slugify(tag) for tag in _parse_tags(tag_string)] return sorted([tag for tag in tags if tag]) ","def rtd_parse_tags(tag_string): """""" Parses a string into its tags. - Lowercases all tags - Converts underscores to hyphens - Slugifies tags - Removes empty tags :see: https://django-taggit.readthedocs.io/page/custom_tagging.html :param tag_string: a delimited string of tags :return: a sorted list of tag strings """""" if tag_string: tag_string = tag_string.lower().replace('_', '-') tags = (slugify(tag) for tag in _parse_tags(tag_string)) return sorted([tag for tag in tags if tag]) " 40373,"def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]], edge_index: Tensor, edge_attr: Optional[Tensor] = None, relabel_nodes: bool = False, num_nodes: Tuple[int, int] = None, return_edge_mask: bool = False): r""""""Returns the induced subgraph of :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`, for a bipartite graph. Args: subset (PairTensor or tuple([int],[int])): The nodes to keep. edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) relabel_nodes (bool, optional): If set to :obj:`True`, the resulting :obj:`edge_index` will be relabeled to hold consecutive indices starting from zero. (default: :obj:`False`) num_nodes (tuple, optional): The number of nodes. (default: :obj:`None`) return_edge_mask (bool, optional): If set to :obj:`True`, will return the edge mask to filter out additional edge features. (default: :obj:`False`) :rtype: (:class:`LongTensor`, :class:`Tensor`) """""" device = edge_index.device if isinstance(subset[0], (list, tuple)): subset = (torch.tensor(subset[0], dtype=torch.long, device=device), torch.tensor(subset[1], dtype=torch.long, device=device)) if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8: num_nodes = subset[0].size(0), subset[1].size(0) else: if num_nodes is None: num_nodes = (maybe_num_nodes(edge_index[0]), maybe_num_nodes(edge_index[1])) subset = (index_to_mask(subset[0], size=num_nodes[0]), index_to_mask(subset[1], size=num_nodes[1])) node_mask_i, node_mask_j = subset[0], subset[1] edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long, device=device) node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long, device=device) node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(), device=device) node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(), device=device) edge_index = torch.stack( [node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]]) if return_edge_mask: return edge_index, edge_attr, edge_mask else: return edge_index, edge_attr ","def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]], edge_index: Tensor, edge_attr: Optional[Tensor] = None, relabel_nodes: bool = False, num_nodes: Tuple[int, int] = None, return_edge_mask: bool = False): r""""""Returns the induced subgraph of :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`, for a bipartite graph. Args: subset (Tuple[Tensor, Tensor] or tuple([int],[int])): The nodes to keep. edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) relabel_nodes (bool, optional): If set to :obj:`True`, the resulting :obj:`edge_index` will be relabeled to hold consecutive indices starting from zero. (default: :obj:`False`) num_nodes (tuple, optional): The number of nodes. (default: :obj:`None`) return_edge_mask (bool, optional): If set to :obj:`True`, will return the edge mask to filter out additional edge features. (default: :obj:`False`) :rtype: (:class:`LongTensor`, :class:`Tensor`) """""" device = edge_index.device if isinstance(subset[0], (list, tuple)): subset = (torch.tensor(subset[0], dtype=torch.long, device=device), torch.tensor(subset[1], dtype=torch.long, device=device)) if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8: num_nodes = subset[0].size(0), subset[1].size(0) else: if num_nodes is None: num_nodes = (maybe_num_nodes(edge_index[0]), maybe_num_nodes(edge_index[1])) subset = (index_to_mask(subset[0], size=num_nodes[0]), index_to_mask(subset[1], size=num_nodes[1])) node_mask_i, node_mask_j = subset[0], subset[1] edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long, device=device) node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long, device=device) node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(), device=device) node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(), device=device) edge_index = torch.stack( [node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]]) if return_edge_mask: return edge_index, edge_attr, edge_mask else: return edge_index, edge_attr " 39569,"def command_line_test_helper(remote_path, branch, pusher_path): work_dir = ""/"".join(os.path.dirname(os.path.abspath(__file__)).split(""/"")[:-1]) + ""/nbgitpuller"" try: cmd = ['python3', 'pull.py', remote_path] if branch is not None: cmd += ['--branch_name', branch] if pusher_path is not None: cmd += ['--repo_dir', pusher_path] sp.check_output( cmd, cwd=work_dir ).decode() return True except Exception: return False ","def command_line_test_helper(remote_path, branch, pusher_path): work_dir = ""/"".join(os.path.dirname(os.path.abspath(__file__)).split(""/"")[:-1]) + ""/nbgitpuller"" try: cmd = ['python3', 'pull.py', remote_path] if branch is not None: cmd += ['--branch_name', branch] if pusher_path is not None: cmd += ['--repo-dir', pusher_path] sp.check_output( cmd, cwd=work_dir ).decode() return True except Exception: return False " 30516,"def main(): params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.File params['indicator'] = json.dumps({ ""regex"": r""^.+,(.+),"", ""transform"": ""\\1"" }) params['fields'] = json.dumps({ ""creationdate"": { ""regex"": r""^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})"", ""transform"": ""\\1"" }, ""malwarefamily"": { ""regex"": r""^.+,.+,(.+)"", ""transform"": ""\\1"" } }) params['ignore_regex'] = '#' params['custom_fields_mapping'] = { ""creationdate"": ""creationdate"", ""malwarefamily"": ""malwarefamily"" } params['url'] = ""https://feodotracker.abuse.ch/downloads/malware_hashes.csv"" # Call the main execution of the HTTP API module. feed_main('Feodo Tracker Hashes Feed', params, 'feodotrackerhashes-') ","def main(): params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.File params['indicator'] = json.dumps({ ""regex"": r""^.+,(.+),"", ""transform"": ""\\1"" }) params['fields'] = json.dumps({ ""creationdate"": { ""regex"": r""^(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})"", ""transform"": ""\\1"" }, ""malwarefamily"": { ""regex"": r""^.+,.+,(.+)"", ""transform"": ""\\1"" } }) params['ignore_regex'] = '#' params['custom_fields_mapping'] = { ""creationdate"": ""creationdate"", ""malwarefamily"": ""malwarefamily"" } params['url'] = ""https://feodotracker.abuse.ch/downloads/malware_hashes.csv"" # Call the main execution of the HTTP API module. feed_main('Feodo Tracker Hashes Feed', params, 'feodotracker-hashes-') " 55653,"def get_hanning_kernel2d(kernel_size: Tuple[int, int], device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r""""""Returns 2d Hanning kernel, used in signal processing and KCF tracker Args: kernel_size: It should be positive. Returns: 2D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size[0], kernel_size[1]})` """""" if kernel_size[0] <= 2 or kernel_size[1] <= 2: raise TypeError(f""ksize must be an tuple of positive integers > 2. Got {kernel_size}"") ky: torch.Tensor = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T kx: torch.Tensor = get_hanning_kernel1d(kernel_size[1], device, dtype)[None] kernel2d = ky @ kx return kernel2d ","def get_hanning_kernel2d(kernel_size: Tuple[int, int], device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r""""""Returns 2d Hanning kernel, used in signal processing and KCF tracker. Args: kernel_size: It should be positive. Returns: 2D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size[0], kernel_size[1]})` """""" if kernel_size[0] <= 2 or kernel_size[1] <= 2: raise TypeError(f""ksize must be an tuple of positive integers > 2. Got {kernel_size}"") ky: torch.Tensor = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T kx: torch.Tensor = get_hanning_kernel1d(kernel_size[1], device, dtype)[None] kernel2d = ky @ kx return kernel2d " 52287,"def get_parser(): parser = SCTArgumentParser( description='Compute SNR using methods described in [Dietrich et al., Measurement of' ' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel ' 'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].' ) mandatoryArguments = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatoryArguments.add_argument( '-i', required=True, help='3D or 4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz', metavar=Metavar.file) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( '-m', help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz', metavar=Metavar.file, default='') optional.add_argument( '-m-noise', help=""Binary (or weighted) mask within which noise will be calculated. Only valid for '-method single'."", metavar=Metavar.file, default='') optional.add_argument( '-method', help='R|Method to use to compute the SNR (default: diff):\n' ""- diff: Substract two volumes (defined by -vol) and estimate noise variance within the ROI "" ""(flag '-m' is required). Requires a 4D volume.\n"" ""- mult: Estimate noise variance over time across volumes specified with '-vol'. Requires a 4D volume.\n"" ""- single: Estimates noise variance in a 5x5 square at the corner of the image, and average the mean "" ""signal inside the ROI specified by flag '-m'. The variance and mean are corrected for Rayleigh "" ""distributions. This corresponds to the cases SNRstd and SNRmean in the Dietrich et al. article. Uses a "" ""3D or a 4D volume. If a 4D volume is input, the volume to compute SNR on is specified by '-vol'."", choices=('diff', 'mult', 'single'), default='diff') optional.add_argument( '-vol', help=""Volumes to compute SNR from. Separate with ',' (Example: '-vol 0,1'), or select range "" ""using ':' (Example: '-vol 2:50'). By default, all volumes in are selected, except if '-method single' "" ""in which case the first volume is selected."", metavar=Metavar.str, default='') optional.add_argument( '-r', type=int, help='Remove temporary files.', default=1, choices=(0, 1)) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"") optional.add_argument( '-o', metavar=Metavar.str, type=str, default=None, help=""File name to write the computed SNR to."" ) return parser ","def get_parser(): parser = SCTArgumentParser( description='Compute SNR using methods described in [Dietrich et al., Measurement of' ' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel ' 'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].' ) mandatoryArguments = parser.add_argument_group(""\nMANDATORY ARGUMENTS"") mandatoryArguments.add_argument( '-i', required=True, help='3D or 4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz', metavar=Metavar.file) optional = parser.add_argument_group(""\nOPTIONAL ARGUMENTS"") optional.add_argument( ""-h"", ""--help"", action=""help"", help=""Show this help message and exit"") optional.add_argument( '-m', help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz', metavar=Metavar.file, default='') optional.add_argument( '-m-noise', help=""Binary (or weighted) mask within which noise will be calculated. Only valid for '-method single'."", metavar=Metavar.file, default='') optional.add_argument( '-method', help='R|Method to use to compute the SNR (default: diff):\n' ""- diff: Substract two volumes (defined by -vol) and estimate noise variance within the ROI "" ""(flag '-m' is required). Requires a 4D volume.\n"" ""- mult: Estimate noise variance over time across volumes specified with '-vol'. Requires a 4D volume.\n"" ""- single: Estimates noise variance in a 5x5 square at the corner of the image, and average the mean "" ""signal inside the ROI specified by flag '-m'. The variance and mean are corrected for Rayleigh "" ""distributions. This corresponds to the cases SNRstd and SNRmean in the Dietrich et al. article. Uses a "" ""3D or a 4D volume. If a 4D volume is input, the volume to compute SNR on is specified by '-vol'."", choices=('diff', 'mult', 'single'), default='diff') optional.add_argument( '-vol', help=""R|Volumes to compute SNR from. Separate with ',' (Example: '-vol 0,1'), or select range "" ""using ':' (Example: '-vol 2:50'). If this argument is not passed:\n"" "" - For '-method mult' and '-method diff', all volumes will be used by default.\n"" "" - For '-method single', the first volume (index [0]) will be used by default."", metavar=Metavar.str, default='') optional.add_argument( '-r', type=int, help='Remove temporary files.', default=1, choices=(0, 1)) optional.add_argument( '-v', metavar=Metavar.int, type=int, choices=[0, 1, 2], default=1, # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as ""if verbose == #"" in API help=""Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"") optional.add_argument( '-o', metavar=Metavar.str, type=str, default=None, help=""File name to write the computed SNR to."" ) return parser " 31547,"def main(): if demisto.args().get(""playbook""): # If we get a playbook, we generate a use case document, instead of teh platform as build r = get_playbook_dependencies(demisto.args().get(""playbook"")) doc = UseCaseDocument( playbook_name=demisto.args().get(""playbook""), dependencies=r ) fr = fileResult(""usecase.html"", doc.html()) return_results(fr) return_results(CommandResults( readable_output=doc.markdown(), )) return # If no playbook is passed, we generate a platform as built. open_incidents = get_open_incidents(MAX_DAYS, MAX_REQUEST_SIZE) closed_incidents = get_closed_incidents(MAX_DAYS, MAX_REQUEST_SIZE) system_config = get_system_config() integrations = get_enabled_integrations() installed_packs = get_installed_packs() playbooks = get_custom_playbooks() automations = get_custom_automations() playbook_stats = get_playbook_stats(playbooks, MAX_DAYS, MAX_REQUEST_SIZE) reports = get_custom_reports() dashboards = get_custom_dashboards() d = Document( MD_DOCUMENT_TEMPLATE, system_config=system_config, integrations_table=integrations, installed_packs_table=installed_packs, playbooks_table=playbooks, automations_table=automations, open_incidents=open_incidents, closed_incidents=closed_incidents, playbook_stats=playbook_stats, reports=reports, dashboards=dashboards ) fr = fileResult(""asbuilt.html"", d.html()) return_results(CommandResults( readable_output=d.markdown(), )) return_results(fr) ","def main(): if demisto.args().get(""playbook""): # If we get a playbook, we generate a use case document, instead of teh platform as build r = get_playbook_dependencies(demisto.args().get(""playbook"")) doc = UseCaseDocument( playbook_name=demisto.args().get(""playbook""), dependencies=r ) fr = fileResult(""usecase.html"", doc.html()) return_results(fr) return_results(CommandResults( readable_output=doc.markdown(), )) return # If no playbook is passed, we generate a platform as built. open_incidents = get_open_incidents(MAX_DAYS, MAX_REQUEST_SIZE) closed_incidents = get_closed_incidents(MAX_DAYS, MAX_REQUEST_SIZE) system_config = get_system_config() integrations = get_enabled_integrations() installed_packs = get_installed_packs() playbooks = get_custom_playbooks() automations = get_custom_automations() playbook_stats = get_playbook_stats(playbooks, MAX_DAYS, MAX_REQUEST_SIZE) reports = get_custom_reports() dashboards = get_custom_dashboards() d = Document( MD_DOCUMENT_TEMPLATE, system_config=system_config, integrations_table=integrations, installed_packs_table=installed_packs, playbooks_table=playbooks, automations_table=automations, open_incidents=open_incidents, closed_incidents=closed_incidents, playbook_stats=playbook_stats, reports=reports, dashboards=dashboards ) fr = fileResult(""asbuilt.html"", d.html(), file_type=EntryType.ENTRY_INFO_FILE) return_results(CommandResults( readable_output=d.markdown(), )) return_results(fr) " 35042,"def _get_name_static(canonical, dtype, shape, batch_dim=None): """"""Get name for static shape tensor array op corresponding to the canonical name"""""" dim_names = [] for dim in shape: if isinstance(dim, Any): dim_names.append(""any"") else: dim_names.append(str(dim)) shape_str = ""_"".join(dim_names) if len(shape_str) == 0: shape_str = ""scalar"" if canonical == ""tensor_t"": return ""static_tensor_{}_{}_t"".format(dtype, shape_str) if not batch_dim or canonical == ""tensor_constructor"" or canonical == ""tensor_nil"": return ""{}_{}_{}"".format(canonical, dtype, shape_str) if batch_dim != 1: return ""{}_{}_{}"".format(canonical, dtype, shape_str) else: return ""{}_{}_batch{}_{}"".format(canonical, dtype, str(batch_dim), shape_str) ","def _get_name_static(canonical, dtype, shape, batch_dim=None): """"""Get name for static shape tensor array op corresponding to the canonical name"""""" dim_names = [] for dim in shape: if isinstance(dim, Any): dim_names.append(""any"") else: dim_names.append(str(dim)) shape_str = ""_"".join(dim_names) if len(shape_str) == 0: shape_str = ""scalar"" if canonical == ""tensor_t"": return ""static_tensor_{}_{}_t"".format(dtype, shape_str) if batch_dim is None or canonical in [""tensor_constructor"", ""tensor_nil""]: return ""{}_{}_{}"".format(canonical, dtype, shape_str) if batch_dim != 1: return ""{}_{}_{}"".format(canonical, dtype, shape_str) else: return ""{}_{}_batch{}_{}"".format(canonical, dtype, str(batch_dim), shape_str) " 30442,"def main(circle_artifacts): print('Starting to create content artifact...') print('creating dir for bundles...') for bundle_dir in [BUNDLE_POST, BUNDLE_TEST, PACKS_BUNDLE]: os.mkdir(bundle_dir) add_tools_to_bundle(BUNDLE_POST) convert_incident_fields_to_array() for package_dir in DIR_TO_PREFIX: # handles nested package directories create_unifieds_and_copy(package_dir) for content_dir in CONTENT_DIRS: print(f'Copying dir {content_dir} to bundles...') copy_dir_files(content_dir, BUNDLE_POST) copy_test_files(BUNDLE_TEST) # handle copying packs content to content_new.zip and content_test.zip packs = get_child_directories(PACKS_DIR) for pack in packs: # each pack directory has it's own content subdirs, 'Integrations', 'Scripts', 'TestPlaybooks', 'Layouts' etc. sub_dirs_paths = get_child_directories(pack) for sub_dir_path in sub_dirs_paths: dir_name = os.path.basename(sub_dir_path) if dir_name == 'TestPlaybooks': copy_test_files(BUNDLE_TEST, sub_dir_path) else: # handle one-level deep content copy_dir_files(sub_dir_path, BUNDLE_POST) if dir_name in DIR_TO_PREFIX.keys(): # then it's a directory with nested packages that need to be handled # handle nested packages create_unifieds_and_copy(sub_dir_path) # handle copying packs content to packs_bundle for zipping to `content_packs.zip` for pack in packs: pack_name = os.path.basename(pack) pack_dst = os.path.join(PACKS_BUNDLE, pack_name) os.mkdir(pack_dst) pack_dirs = get_child_directories(pack) pack_files = get_child_files(pack) # copy first level pack files over for file_path in pack_files: shutil.copy(file_path, os.path.join(pack_dst, os.path.basename(file_path))) # handle content directories in the pack for content_dir in pack_dirs: dir_name = os.path.basename(content_dir) dest_dir = os.path.join(pack_dst, dir_name) os.mkdir(dest_dir) if dir_name in DIR_TO_PREFIX.keys(): packages_dirs = get_child_directories(content_dir) for package_dir in packages_dirs: package_dir_name = os.path.basename(package_dir) dest_package_dir = os.path.join(dest_dir, package_dir_name) os.mkdir(dest_package_dir) package_dir_with_slash = package_dir + '/' merge_script_package_to_yml(package_dir_with_slash, dir_name, dest_path=dest_package_dir) # also copy CHANGELOG markdown files over package_files = get_child_files(package_dir) changelog_files = [ file_path for file_path in package_files if ('CHANGELOG' in file_path and file_path.endswith('.md')) ] for md_file_path in changelog_files: shutil.copyfile(md_file_path, os.path.join(dest_package_dir, os.path.basename(md_file_path))) else: if dir_name == INCIDENT_FIELDS_DIR: convert_incident_fields_to_array(content_dir) copy_dir_files(content_dir, dest_dir) print('Copying content descriptor to bundles') for bundle_dir in [BUNDLE_POST, BUNDLE_TEST]: shutil.copyfile('content-descriptor.json', os.path.join(bundle_dir, 'content-descriptor.json')) print('copying common server doc to bundles') shutil.copyfile('./Documentation/doc-CommonServer.json', os.path.join(BUNDLE_POST, 'doc-CommonServer.json')) print('Compressing bundles...') shutil.make_archive(ZIP_POST, 'zip', BUNDLE_POST) shutil.make_archive(ZIP_TEST, 'zip', BUNDLE_TEST) shutil.make_archive(ZIP_PACKS, 'zip', PACKS_BUNDLE) shutil.copyfile(ZIP_POST + '.zip', os.path.join(circle_artifacts, ZIP_POST + '.zip')) shutil.copyfile(ZIP_TEST + '.zip', os.path.join(circle_artifacts, ZIP_TEST + '.zip')) shutil.copyfile(ZIP_PACKS + '.zip', os.path.join(circle_artifacts, ZIP_PACKS + '.zip')) shutil.copyfile(""./Tests/id_set.json"", os.path.join(circle_artifacts, ""id_set.json"")) shutil.copyfile('release-notes.md', os.path.join(circle_artifacts, 'release-notes.md')) print(f'finished create content artifact at {circle_artifacts}') ","def main(circle_artifacts): print('Starting to create content artifact...') print('creating dir for bundles...') for bundle_dir in [BUNDLE_POST, BUNDLE_TEST, PACKS_BUNDLE]: os.mkdir(bundle_dir) add_tools_to_bundle(BUNDLE_POST) convert_incident_fields_to_array() for package_dir in DIR_TO_PREFIX: # handles nested package directories create_unifieds_and_copy(package_dir) for content_dir in CONTENT_DIRS: print(f'Copying dir {content_dir} to bundles...') copy_dir_files(content_dir, BUNDLE_POST) copy_test_files(BUNDLE_TEST) # handle copying packs content to content_new.zip and content_test.zip packs = get_child_directories(PACKS_DIR) for pack in packs: # each pack directory has it's own content subdirs, 'Integrations', 'Scripts', 'TestPlaybooks', 'Layouts' etc. sub_dirs_paths = get_child_directories(pack) for sub_dir_path in sub_dirs_paths: dir_name = os.path.basename(sub_dir_path) if dir_name == 'TestPlaybooks': copy_test_files(BUNDLE_TEST, sub_dir_path) else: # handle one-level deep content copy_dir_files(sub_dir_path, BUNDLE_POST) if dir_name in DIR_TO_PREFIX: # then it's a directory with nested packages that need to be handled # handle nested packages create_unifieds_and_copy(sub_dir_path) # handle copying packs content to packs_bundle for zipping to `content_packs.zip` for pack in packs: pack_name = os.path.basename(pack) pack_dst = os.path.join(PACKS_BUNDLE, pack_name) os.mkdir(pack_dst) pack_dirs = get_child_directories(pack) pack_files = get_child_files(pack) # copy first level pack files over for file_path in pack_files: shutil.copy(file_path, os.path.join(pack_dst, os.path.basename(file_path))) # handle content directories in the pack for content_dir in pack_dirs: dir_name = os.path.basename(content_dir) dest_dir = os.path.join(pack_dst, dir_name) os.mkdir(dest_dir) if dir_name in DIR_TO_PREFIX.keys(): packages_dirs = get_child_directories(content_dir) for package_dir in packages_dirs: package_dir_name = os.path.basename(package_dir) dest_package_dir = os.path.join(dest_dir, package_dir_name) os.mkdir(dest_package_dir) package_dir_with_slash = package_dir + '/' merge_script_package_to_yml(package_dir_with_slash, dir_name, dest_path=dest_package_dir) # also copy CHANGELOG markdown files over package_files = get_child_files(package_dir) changelog_files = [ file_path for file_path in package_files if ('CHANGELOG' in file_path and file_path.endswith('.md')) ] for md_file_path in changelog_files: shutil.copyfile(md_file_path, os.path.join(dest_package_dir, os.path.basename(md_file_path))) else: if dir_name == INCIDENT_FIELDS_DIR: convert_incident_fields_to_array(content_dir) copy_dir_files(content_dir, dest_dir) print('Copying content descriptor to bundles') for bundle_dir in [BUNDLE_POST, BUNDLE_TEST]: shutil.copyfile('content-descriptor.json', os.path.join(bundle_dir, 'content-descriptor.json')) print('copying common server doc to bundles') shutil.copyfile('./Documentation/doc-CommonServer.json', os.path.join(BUNDLE_POST, 'doc-CommonServer.json')) print('Compressing bundles...') shutil.make_archive(ZIP_POST, 'zip', BUNDLE_POST) shutil.make_archive(ZIP_TEST, 'zip', BUNDLE_TEST) shutil.make_archive(ZIP_PACKS, 'zip', PACKS_BUNDLE) shutil.copyfile(ZIP_POST + '.zip', os.path.join(circle_artifacts, ZIP_POST + '.zip')) shutil.copyfile(ZIP_TEST + '.zip', os.path.join(circle_artifacts, ZIP_TEST + '.zip')) shutil.copyfile(ZIP_PACKS + '.zip', os.path.join(circle_artifacts, ZIP_PACKS + '.zip')) shutil.copyfile(""./Tests/id_set.json"", os.path.join(circle_artifacts, ""id_set.json"")) shutil.copyfile('release-notes.md', os.path.join(circle_artifacts, 'release-notes.md')) print(f'finished create content artifact at {circle_artifacts}') " 30036,"def check_reset_seed(env: gym.Env): """"""Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """""" signature = inspect.signature(env.reset) if ""seed"" in signature.parameters or ""kwargs"" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), ""The observation returns by `env.reset(seed=123)` is not within the observation space"" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), ""The observation returns by `env.reset(seed=123)` is not within the observation space"" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), ""`env.reset(seed=123)` is not deterministic as the observations are not equivalent"" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( ""Mostly likely the environment reset function does not call `super().reset(seed=seed)` "" ""as the random generates are not same when the same seeds are passed to `env.reset`."" ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), ""The observation returns by `env.reset(seed=456)` is not within the observation space"" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( ""Mostly likely the environment reset function does not call `super().reset(seed=seed)` "" ""as the random generates are not different when different seeds are passed to `env.reset`."" ) except TypeError as e: raise AssertionError( ""The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. "" ""This should never happen, please report this issue. "" f""The error was: {e}"" ) if env.unwrapped._np_random is None: logger.warn( ""Resetting the environment did not result in seeding its random number generator. "" ""This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. "" ""If you do not use the python-level random number generator, this is not a problem."" ) seed_param = signature.parameters.get(""seed"") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( ""The default seed argument in reset should be `None`, "" ""otherwise the environment will by default always be deterministic. "" f""Actual default: {seed_param.default}"" ) else: raise gym.error.Error( ""The `reset` method does not provide the `seed` keyword argument"" ) ","def check_reset_seed(env: gym.Env): """"""Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """""" signature = inspect.signature(env.reset) if ""seed"" in signature.parameters or ""kwargs"" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), ""The observation returns by `env.reset(seed=123)` is not within the observation space"" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), ""The observation returns by `env.reset(seed=123)` is not within the observation space"" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), ""`env.reset(seed=123)` is not deterministic as the observations are not equivalent"" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( ""Mostly likely the environment reset function does not call `super().reset(seed=seed)` "" ""as the random generates are not same when the same seeds are passed to `env.reset`."" ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), ""The observation returns by `env.reset(seed=456)` is not within the observation space"" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( ""Mostly likely the environment reset function does not call `super().reset(seed=seed)` "" ""as the random number generators are not different when different seeds are passed to `env.reset`."" ) except TypeError as e: raise AssertionError( ""The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. "" ""This should never happen, please report this issue. "" f""The error was: {e}"" ) if env.unwrapped._np_random is None: logger.warn( ""Resetting the environment did not result in seeding its random number generator. "" ""This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. "" ""If you do not use the python-level random number generator, this is not a problem."" ) seed_param = signature.parameters.get(""seed"") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( ""The default seed argument in reset should be `None`, "" ""otherwise the environment will by default always be deterministic. "" f""Actual default: {seed_param.default}"" ) else: raise gym.error.Error( ""The `reset` method does not provide the `seed` keyword argument"" ) " 30334,"def get_group(): """""" retrieve a single Group """""" group_type = demisto.args().get('group_type') group_id = int(demisto.args().get('group_id')) response = get_group_request(group_type, group_id) if group_type == 'adversaries': data = response.get('data', {}).get('adversarie', {}) if group_type == 'campaigns': data = response.get('data', {}).get('campaign', {}) if group_type == 'documents': data = response.get('data', {}).get('document', {}) if group_type == 'emails': data = response.get('data', {}).get('email', {}) if group_type == 'events': data = response.get('data', {}).get('event', {}) if group_type == 'incidents': data = response.get('data', {}).get('incident', {}) if group_type == 'intrusionSets': data = response.get('data', {}).get('intrusionSet', {}) if group_type == 'reports': data = response.get('data', {}).get('report', {}) if group_type == 'signatures': data = response.get('data', {}).get('signature', {}) if group_type == 'threats': data = response.get('data', {}).get('threat', {}) if response.get('status') == 'Success': contents = { 'ID': data.get('id'), 'Name': data.get('name'), 'Owner': data.get('owner'), 'DateAdded': data.get('dateAdded'), 'EventDate': data.get('eventDate'), 'Status': data.get('status') } else: return_error(response.get('message')) context = { 'TC.Group(val.ID && val.ID === obj.ID)': contents } return_outputs( tableToMarkdown('Group information', contents, removeNull=True), context ) ","def get_group(): """""" retrieve a single Group """""" group_type = demisto.args().get('group_type') group_id = int(demisto.args().get('group_id')) response = get_group_request(group_type, group_id).get('data', {}) if group_type == 'adversaries': data = response.get('data', {}).get('adversarie', {}) if group_type == 'campaigns': data = response.get('data', {}).get('campaign', {}) if group_type == 'documents': data = response.get('data', {}).get('document', {}) if group_type == 'emails': data = response.get('data', {}).get('email', {}) if group_type == 'events': data = response.get('data', {}).get('event', {}) if group_type == 'incidents': data = response.get('data', {}).get('incident', {}) if group_type == 'intrusionSets': data = response.get('data', {}).get('intrusionSet', {}) if group_type == 'reports': data = response.get('data', {}).get('report', {}) if group_type == 'signatures': data = response.get('data', {}).get('signature', {}) if group_type == 'threats': data = response.get('data', {}).get('threat', {}) if response.get('status') == 'Success': contents = { 'ID': data.get('id'), 'Name': data.get('name'), 'Owner': data.get('owner'), 'DateAdded': data.get('dateAdded'), 'EventDate': data.get('eventDate'), 'Status': data.get('status') } else: return_error(response.get('message')) context = { 'TC.Group(val.ID && val.ID === obj.ID)': contents } return_outputs( tableToMarkdown('Group information', contents, removeNull=True), context ) " 31425,"def main(): """""" Main function """""" params = demisto.params() server: str = (params.get('url')[:-1] if (params.get('url') and params.get('url').endswith('/')) else params.get('url')) client = Client( base_url=str(server) + '/api/v1/', user_name=params.get('credentials', {}).get('identifier'), password=params.get('credentials', {}).get('password'), use_ssl=not params.get('insecure', False), reliability=params.get('integrationReliability') ) args = { 'fetch_time': params.get('fetch_time', '').strip(), 'fetch_limit': params.get('fetch_limit', '10') } global RAISE_EXCEPTION_ON_ERROR LOG('Command being called is {}'.format(demisto.command())) handle_proxy() command_dict = { 'test-module': test_module, 'fetch-incidents': fetch_incidents, 'phishlabs-global-feed': get_global_feed_command, 'phishlabs-get-incident-indicators': get_incident_indicators_command } try: command_func: Callable = command_dict[demisto.command()] # type:ignore[assignment] if demisto.command() == 'fetch-incidents': RAISE_EXCEPTION_ON_ERROR = True command_func(client, **args) else: command_func(client) except Exception as e: if RAISE_EXCEPTION_ON_ERROR: LOG(str(e)) LOG.print_log() raise else: return_error(str(e)) ","def main(): """""" Main function """""" params = demisto.params() server: str = (params.get('url')[:-1] if (params.get('url') and params.get('url').endswith('/')) else params.get('url')) client = Client( base_url=f'{str(server)}/api/v1/', user_name=params.get('credentials', {}).get('identifier'), password=params.get('credentials', {}).get('password'), use_ssl=not params.get('insecure', False), reliability=params.get('integrationReliability') ) args = { 'fetch_time': params.get('fetch_time', '').strip(), 'fetch_limit': params.get('fetch_limit', '10') } global RAISE_EXCEPTION_ON_ERROR LOG('Command being called is {}'.format(demisto.command())) handle_proxy() command_dict = { 'test-module': test_module, 'fetch-incidents': fetch_incidents, 'phishlabs-global-feed': get_global_feed_command, 'phishlabs-get-incident-indicators': get_incident_indicators_command } try: command_func: Callable = command_dict[demisto.command()] # type:ignore[assignment] if demisto.command() == 'fetch-incidents': RAISE_EXCEPTION_ON_ERROR = True command_func(client, **args) else: command_func(client) except Exception as e: if RAISE_EXCEPTION_ON_ERROR: LOG(str(e)) LOG.print_log() raise else: return_error(str(e)) " 11273,"def test_error_map(): request = HttpRequest(""GET"", ""https://bing.com"") response = HttpResponse(request, None) error_map = { 404: ResourceNotFoundError } with pytest.raises(ResourceNotFoundError): map_error(404, response, error_map) ","def test_error_map(): request = HttpRequest(""GET"", """") response = HttpResponse(request, None) error_map = { 404: ResourceNotFoundError } with pytest.raises(ResourceNotFoundError): map_error(404, response, error_map) " 5716,"def _kpp(data, k, rng): """""" Picks k points in the data based on the kmeans++ method. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : `numpy.random.Generator` or `numpy.random.RandomState` Random number generator Returns ------- init : ndarray A 'k' by 'N' containing the initial centroids. References ---------- .. [1] D. Arthur and S. Vassilvitskii, ""k-means++: the advantages of careful seeding"", Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, 2007. """""" dims = data.shape[1] if len(data.shape) > 1 else 1 init = np.ndarray((k, dims)) # get random generator API try: rng_integers = rng.integers except AttributeError: rng_integers = rng.randint # legacy API try: rng_random = rng.random except AttributeError: rng_random = rng.rand # legacy API for i in range(k): if i == 0: init[i, :] = data[rng_integers(data.shape[0])] else: D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) probs = D2/D2.sum() cumprobs = probs.cumsum() r = rng_random() init[i, :] = data[np.searchsorted(cumprobs, r)] return init ","def _kpp(data, k, rng): """""" Picks k points in the data based on the kmeans++ method. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : `numpy.random.Generator` or `numpy.random.RandomState` Random number generator. Returns ------- init : ndarray A 'k' by 'N' containing the initial centroids. References ---------- .. [1] D. Arthur and S. Vassilvitskii, ""k-means++: the advantages of careful seeding"", Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, 2007. """""" dims = data.shape[1] if len(data.shape) > 1 else 1 init = np.ndarray((k, dims)) # get random generator API try: rng_integers = rng.integers except AttributeError: rng_integers = rng.randint # legacy API try: rng_random = rng.random except AttributeError: rng_random = rng.rand # legacy API for i in range(k): if i == 0: init[i, :] = data[rng_integers(data.shape[0])] else: D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) probs = D2/D2.sum() cumprobs = probs.cumsum() r = rng_random() init[i, :] = data[np.searchsorted(cumprobs, r)] return init " 32136,"def main(): res = demisto.executeCommand('addEntitlement', {'persistent': demisto.get(demisto.args(), 'persistent'), 'replyEntriesTag': demisto.get(demisto.args(), 'replyEntriesTag')}) if isError(res[0]): demisto.results(res) sys.exit(0) entitlement = demisto.get(res[0], 'Contents') lifetime = '1 day' try: expiry = datetime.strftime(dateparser.parse('in ' + lifetime, settings={'TIMEZONE': 'UTC'}), DATE_FORMAT) except Exception: expiry = datetime.strftime(dateparser.parse('in 1 day', settings={'TIMEZONE': 'UTC'}), DATE_FORMAT) entitlement_string = entitlement + '@' + demisto.investigation()['id'] if demisto.get(demisto.args(), 'task'): entitlement_string += '|' + demisto.get(demisto.args(), 'task') args = demisto.args() file_name = args.get('file_name') policy_name = args.get('policy_name') feedback_options = demisto.get(demisto.args(), 'feedback_options') options = [] if feedback_options: for feedback in feedback_options.split(','): options.append({ 'text': feedback, 'style': 'primary' }) args = { 'ignoreAddURL': 'true', 'using-brand': 'SlackV3' } reply = ""Thank you for your response."" blocks = json.dumps(create_blocks(file_name, policy_name, entitlement_string, options, reply)) args['blocks'] = json.dumps({ 'blocks': blocks, 'entitlement': entitlement_string, 'reply': reply, 'expiry': expiry, 'default_response': 'NoResponse' }) to = demisto.get(demisto.args(), 'user') if to: args['to'] = to else: return_error('A user must be provided.') try: demisto.results(demisto.executeCommand('send-notification', args)) except ValueError as e: if 'Unsupported Command' in str(e): return_error( 'The command is unsupported by any integration instance. If you have SlackV3 or above enabled, ' 'please use SlackAskV2 instead.') else: return_error('An error has occurred while executing the send-notification command', error=e) ","def main(): res = demisto.executeCommand('addEntitlement', {'persistent': demisto.get(demisto.args(), 'persistent'), 'replyEntriesTag': demisto.get(demisto.args(), 'replyEntriesTag')}) if isError(res[0]): demisto.results(res) sys.exit(0) entitlement = demisto.get(res[0], 'Contents') lifetime = '1 day' try: expiry = datetime.strftime(dateparser.parse('in ' + lifetime, settings={'TIMEZONE': 'UTC'}), DATE_FORMAT) except Exception: expiry = datetime.strftime(dateparser.parse('in 1 day', settings={'TIMEZONE': 'UTC'}), DATE_FORMAT) entitlement_string = entitlement + '@' + demisto.investigation()['id'] if demisto.get(demisto.args(), 'task'): entitlement_string += f'|{demisto.get(demisto.args(), ""task"")}' args = demisto.args() file_name = args.get('file_name') policy_name = args.get('policy_name') feedback_options = demisto.get(demisto.args(), 'feedback_options') options = [] if feedback_options: for feedback in feedback_options.split(','): options.append({ 'text': feedback, 'style': 'primary' }) args = { 'ignoreAddURL': 'true', 'using-brand': 'SlackV3' } reply = ""Thank you for your response."" blocks = json.dumps(create_blocks(file_name, policy_name, entitlement_string, options, reply)) args['blocks'] = json.dumps({ 'blocks': blocks, 'entitlement': entitlement_string, 'reply': reply, 'expiry': expiry, 'default_response': 'NoResponse' }) to = demisto.get(demisto.args(), 'user') if to: args['to'] = to else: return_error('A user must be provided.') try: demisto.results(demisto.executeCommand('send-notification', args)) except ValueError as e: if 'Unsupported Command' in str(e): return_error( 'The command is unsupported by any integration instance. If you have SlackV3 or above enabled, ' 'please use SlackAskV2 instead.') else: return_error('An error has occurred while executing the send-notification command', error=e) " 6865,"def get_site_config(sites_path=None, site_path=None): """"""Returns `site_config.json` combined with `sites/common_site_config.json`. `site_config` is a set of site wide settings like database name, password, email etc."""""" config = {} sites_path = sites_path or getattr(local, ""sites_path"", None) site_path = site_path or getattr(local, ""site_path"", None) if sites_path: common_site_config = os.path.join(sites_path, ""common_site_config.json"") if os.path.exists(common_site_config): try: config.update(get_file_json(common_site_config)) except Exception as error: click.secho(""Common Site Config may be corrupted"", fg=""red"") print(error) if site_path: site_config = os.path.join(site_path, ""site_config.json"") if os.path.exists(site_config): try: config.update(get_file_json(site_config)) except Exception as error: click.secho(""Site Config for {0} may be corrupted"".format(local.site), fg=""red"") print(error) elif local.site and not local.flags.new_site: raise IncorrectSitePath(""{0} does not exist"".format(local.site)) return _dict(config) ","def get_site_config(sites_path=None, site_path=None): """"""Returns `site_config.json` combined with `sites/common_site_config.json`. `site_config` is a set of site wide settings like database name, password, email etc."""""" config = {} sites_path = sites_path or getattr(local, ""sites_path"", None) site_path = site_path or getattr(local, ""site_path"", None) if sites_path: common_site_config = os.path.join(sites_path, ""common_site_config.json"") if os.path.exists(common_site_config): try: config.update(get_file_json(common_site_config)) except Exception as error: click.secho(""Common Site Config may be corrupted"", fg=""red"") print(error) if site_path: site_config = os.path.join(site_path, ""site_config.json"") if os.path.exists(site_config): try: config.update(get_file_json(site_config)) except Exception as error: click.secho(""{0}/site_config.json is invalid"".format(local.site), fg=""red"") print(error) elif local.site and not local.flags.new_site: raise IncorrectSitePath(""{0} does not exist"".format(local.site)) return _dict(config) " 30922,"def main(): LOG('command is %s' % (demisto.command(),)) try: if command == 'test-module': response = requests.get(baseURL) if response is not None: demisto.results(""ok"") else: demisto.results('Test Failed: {}'.format(res.text)) elif demisto.command() == 'get-all-lights': demisto.results(get_all_lights_command()) elif demisto.command() == 'get-new-lights': demisto.results(get_new_lights_command()) elif demisto.command() == 'search-new-lights': demisto.results(search_new_lights_command()) elif demisto.command() == 'get-light-state': demisto.results(get_light_state_command()) elif demisto.command() == 'set-light-state': demisto.results(set_light_state_command()) elif demisto.command() == 'get-all-groups': demisto.results(get_all_groups_command()) elif demisto.command() == 'create-group': demisto.results(create_group_command()) elif demisto.command() == 'get-group-state': demisto.results(get_group_state_command()) elif demisto.command() == 'set-group-state': demisto.results(set_group_state_command()) except Exception as e: return_error(str(e)) ","def main(): LOG('command is %s' % (demisto.command(),)) try: if command == 'test-module': response = requests.get(baseURL) if response is not None: demisto.results(""ok"") else: demisto.results('Test Failed: {}'.format(response.text)) elif demisto.command() == 'get-all-lights': demisto.results(get_all_lights_command()) elif demisto.command() == 'get-new-lights': demisto.results(get_new_lights_command()) elif demisto.command() == 'search-new-lights': demisto.results(search_new_lights_command()) elif demisto.command() == 'get-light-state': demisto.results(get_light_state_command()) elif demisto.command() == 'set-light-state': demisto.results(set_light_state_command()) elif demisto.command() == 'get-all-groups': demisto.results(get_all_groups_command()) elif demisto.command() == 'create-group': demisto.results(create_group_command()) elif demisto.command() == 'get-group-state': demisto.results(get_group_state_command()) elif demisto.command() == 'set-group-state': demisto.results(set_group_state_command()) except Exception as e: return_error(str(e)) " 37038,"def job_monitor(job, interval=None, monitor_async=False, quiet=False, to_file=None): """"""Monitor the status of a IBMQJob instance. Args: job (BaseJob): Job to monitor. interval (int): Time interval between status queries. monitor_async (bool): Monitor asyncronously (in Jupyter only). quiet (bool): If True, do not print status messages. to_file (file): If file print status messages to it, else to stdout. Raises: QiskitError: When trying to run async outside of Jupyter ImportError: ipywidgets not available for notebook. """""" if interval is None: _interval_set = False interval = 2 else: _interval_set = True if _NOTEBOOK_ENV: if monitor_async: try: import ipywidgets as widgets # pylint: disable=import-error except ImportError: raise ImportError('These functions need ipywidgets. ' 'Run ""pip install ipywidgets"" before.') from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412 style = ""font-size:16px;"" header = ""

Job Status: %s

"".format( style=style) status = widgets.HTML(value=header % job.status().value) display(status) thread = threading.Thread(target=_html_checker, args=(job, interval, status, header)) thread.start() else: _text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file) else: if monitor_async: raise QiskitError( 'monitor_async only available in Jupyter notebooks.') _text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file) ","def job_monitor(job, interval=None, monitor_async=False, quiet=False, to_file=None): """"""Monitor the status of a IBMQJob instance. Args: job (BaseJob): Job to monitor. interval (int): Time interval between status queries. monitor_async (bool): Monitor asyncronously (in Jupyter only). quiet (bool): If True, do not print status messages. to_file (file): If file print status messages to it, else to stdout. Raises: QiskitError: When trying to run async outside of Jupyter ImportError: ipywidgets not available for notebook. """""" if interval is None: _interval_set = False interval = 2 else: _interval_set = True if _NOTEBOOK_ENV: if monitor_async: try: import ipywidgets as widgets # pylint: disable=import-error except ImportError: raise ImportError('These functions need ipywidgets. ' 'Run ""pip install ipywidgets"" before.') from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412 style = ""font-size:16px;"" header = ""

Job Status: %s

"".format( style=style) status = widgets.HTML(value=header % job.status().value) display(status) thread = threading.Thread(target=_html_checker, args=(job, interval, status, header)) thread.start() else: _text_checker(job, interval, _interval_set, quiet=quiet, output=output) else: if monitor_async: raise QiskitError( 'monitor_async only available in Jupyter notebooks.') _text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file) " 17750,"def relative_import(parent_name, rel_modules=(), rel_classes=()): """""" Helper function to import submodules lazily in Pythong 3.7+ Parameters ---------- rel_modules: list of str list of submodules to import, of the form .submodule rel_classes: list of str list of submodule classes/variables to import, of the form ._submodule.Foo Returns ------- tuple Tuple that should be assigned to __all__, __getattr__ in the caller """""" module_names = {rel_module.split(""."")[-1]: rel_module for rel_module in rel_modules} class_names = {rel_path.split(""."")[-1]: rel_path for rel_path in rel_classes} def __getattr__(import_name): # In Python 3.7+, lazy import submodules # Check for submodule if import_name in module_names: # print(parent_name, import_name) rel_import = module_names[import_name] return importlib.import_module(rel_import, parent_name) # Check for submodule class if import_name in class_names: # print(parent_name, import_name) rel_path_parts = class_names[import_name].split(""."") rel_module = ""."".join(rel_path_parts[:-1]) class_name = import_name class_module = importlib.import_module(rel_module, parent_name) return getattr(class_module, class_name) raise AttributeError( ""module {__name__!r} has no attribute {name!r}"".format( name=import_name, __name__=parent_name ) ) __all__ = list(module_names) + list(class_names) return __all__, __getattr__ ","def relative_import(parent_name, rel_modules=(), rel_classes=()): """""" Helper function to import submodules lazily in Python 3.7+ Parameters ---------- rel_modules: list of str list of submodules to import, of the form .submodule rel_classes: list of str list of submodule classes/variables to import, of the form ._submodule.Foo Returns ------- tuple Tuple that should be assigned to __all__, __getattr__ in the caller """""" module_names = {rel_module.split(""."")[-1]: rel_module for rel_module in rel_modules} class_names = {rel_path.split(""."")[-1]: rel_path for rel_path in rel_classes} def __getattr__(import_name): # In Python 3.7+, lazy import submodules # Check for submodule if import_name in module_names: # print(parent_name, import_name) rel_import = module_names[import_name] return importlib.import_module(rel_import, parent_name) # Check for submodule class if import_name in class_names: # print(parent_name, import_name) rel_path_parts = class_names[import_name].split(""."") rel_module = ""."".join(rel_path_parts[:-1]) class_name = import_name class_module = importlib.import_module(rel_module, parent_name) return getattr(class_module, class_name) raise AttributeError( ""module {__name__!r} has no attribute {name!r}"".format( name=import_name, __name__=parent_name ) ) __all__ = list(module_names) + list(class_names) return __all__, __getattr__ " 44216,"def _add_op(op, layer_str, wire_map, decimals, cache): """"""Updates ``layer_str`` with ``op`` operation."""""" layer_str = _add_grouping_symbols(op, layer_str, wire_map) control_wires = op.control_wires control_values = op.hyperparameters.get(""control_values"", None) for i, w in enumerate(control_wires): if control_values: if control_values[i] == ""1"": layer_str[wire_map[w]] += ""C"" elif control_values[i] == ""0"": layer_str[wire_map[w]] += ""O"" else: layer_str[wire_map[w]] += ""C"" label = op.label(decimals=decimals, cache=cache).replace(""\n"", """") if len(op.wires) == 0: # operation (e.g. barrier, snapshot) across all wires for i, s in enumerate(layer_str): layer_str[i] = s + label else: for w in op.wires: if w not in control_wires: layer_str[wire_map[w]] += label return layer_str ","def _add_op(op, layer_str, wire_map, decimals, cache): """"""Updates ``layer_str`` with ``op`` operation."""""" layer_str = _add_grouping_symbols(op, layer_str, wire_map) control_wires = op.control_wires control_values = op.hyperparameters.get(""control_values"", None) for w, val in zip(control_wires, control_values): if control_values: if control_values[i] == ""1"": layer_str[wire_map[w]] += ""C"" elif control_values[i] == ""0"": layer_str[wire_map[w]] += ""O"" else: layer_str[wire_map[w]] += ""C"" label = op.label(decimals=decimals, cache=cache).replace(""\n"", """") if len(op.wires) == 0: # operation (e.g. barrier, snapshot) across all wires for i, s in enumerate(layer_str): layer_str[i] = s + label else: for w in op.wires: if w not in control_wires: layer_str[wire_map[w]] += label return layer_str " 1326,"def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs): """"""Computes the OPTICS reachability graph. Read more in the :ref:`User Guide `. Parameters ---------- X : array, shape (n_samples, n_features), or (n_samples, n_samples) \ if metric=’precomputed’. A feature array, or array of distances between samples if metric='precomputed' min_samples : int (default=5) The number of samples in a neighborhood for a point to be considered as a core point. Expressed as an absolute number or a fraction of the number of samples (rounded to be at least 2). max_eps : float, optional (default=np.inf) The maximum distance between two samples for one to be considered as in the neighborhood of the other. Default value of ``np.inf`` will identify clusters across all scales; reducing ``max_eps`` will result in shorter run times. metric : string or callable, optional (default='minkowski') Metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. If metric is ""precomputed"", X is assumed to be a distance matrix and must be square. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. p : integer, optional (default=2) Parameter for the Minkowski metric from :class:`sklearn.metrics.pairwise_distances`. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, optional (default=None) Additional keyword arguments for the metric function. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. (default) Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default=30) Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. n_jobs : int or None, optional (default=None) The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. Returns ------- ordering_ : array, shape (n_samples,) The cluster ordered list of sample indices. core_distances_ : array, shape (n_samples,) Distance at which each sample becomes a core point, indexed by object order. Points which will never be core have a distance of inf. Use ``clust.core_distances_[clust.ordering_]`` to access in cluster order. reachability_ : array, shape (n_samples,) Reachability distances per sample, indexed by object order. Use ``clust.reachability_[clust.ordering_]`` to access in cluster order. predecessor_ : array, shape (n_samples,) Point that a sample was reached from, indexed by object order. Seed points have a predecessor of -1. References ---------- .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander. ""OPTICS: ordering points to identify the clustering structure."" ACM SIGMOD Record 28, no. 2 (1999): 49-60. """""" n_samples = X.shape[0] _validate_size(min_samples, n_samples, 'min_samples') if min_samples <= 1: min_samples = max(2, min_samples * n_samples) # Start all points as 'unprocessed' ## reachability_ = np.empty(n_samples) reachability_.fill(np.inf) predecessor_ = np.empty(n_samples, dtype=int) predecessor_.fill(-1) nbrs = NearestNeighbors(n_neighbors=min_samples, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs) nbrs.fit(X) # Here we first do a kNN query for each point, this differs from # the original OPTICS that only used epsilon range queries. # TODO: handle working_memory somehow? core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None) # OPTICS puts an upper limit on these, use inf for undefined. core_distances_[core_distances_ > max_eps] = np.inf # Main OPTICS loop. Not parallelizable. The order that entries are # written to the 'ordering_' list is important! # Note that this implementation is O(n^2) theoretically, but # supposedly with very low constant factors. processed = np.zeros(X.shape[0], dtype=bool) ordering = np.zeros(X.shape[0], dtype=int) for ordering_idx in range(X.shape[0]): # Choose next based on smallest reachability distance # (And prefer smaller ids on ties, possibly np.inf!) index = np.where(processed == 0)[0] point = index[np.argmin(reachability_[index])] processed[point] = True ordering[ordering_idx] = point if core_distances_[point] != np.inf: _set_reach_dist(core_distances_=core_distances_, reachability_=reachability_, predecessor_=predecessor_, point_index=point, processed=processed, X=X, nbrs=nbrs, metric=metric, metric_params=metric_params, p=p, max_eps=max_eps) if np.all(np.isinf(reachability_)): warnings.warn(""All reachability values are inf. Set a larger"" "" max_eps or all data will be considered outliers."", UserWarning) return ordering, core_distances_, reachability_, predecessor_ ","def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs): """"""Computes the OPTICS reachability graph. Read more in the :ref:`User Guide `. Parameters ---------- X : array, shape (n_samples, n_features), or (n_samples, n_samples) \ if metric=’precomputed’. A feature array, or array of distances between samples if metric='precomputed' min_samples : int (default=5) The number of samples in a neighborhood for a point to be considered as a core point. Expressed as an absolute number or a fraction of the number of samples (rounded to be at least 2). max_eps : float, optional (default=np.inf) The maximum distance between two samples for one to be considered as in the neighborhood of the other. Default value of ``np.inf`` will identify clusters across all scales; reducing ``max_eps`` will result in shorter run times. metric : string or callable, optional (default='minkowski') Metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. If metric is ""precomputed"", X is assumed to be a distance matrix and must be square. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. p : integer, optional (default=2) Parameter for the Minkowski metric from :class:`sklearn.metrics.pairwise_distances`. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, optional (default=None) Additional keyword arguments for the metric function. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. (default) Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default=30) Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. n_jobs : int or None, optional (default=None) The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. Returns ------- ordering_ : array, shape (n_samples,) The cluster ordered list of sample indices. core_distances_ : array, shape (n_samples,) Distance at which each sample becomes a core point, indexed by object order. Points which will never be core have a distance of inf. Use ``clust.core_distances_[clust.ordering_]`` to access in cluster order. reachability_ : array, shape (n_samples,) Reachability distances per sample, indexed by object order. Use ``clust.reachability_[clust.ordering_]`` to access in cluster order. predecessor_ : array, shape (n_samples,) Point that a sample was reached from, indexed by object order. Seed points have a predecessor of -1. References ---------- .. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander. ""OPTICS: ordering points to identify the clustering structure."" ACM SIGMOD Record 28, no. 2 (1999): 49-60. """""" n_samples = X.shape[0] _validate_size(min_samples, n_samples, 'min_samples') if min_samples <= 1: min_samples = max(2, min_samples * n_samples) # Start all points as 'unprocessed' ## reachability_ = np.empty(n_samples) reachability_.fill(np.inf) predecessor_ = np.empty(n_samples, dtype=int) predecessor_.fill(-1) nbrs = NearestNeighbors(n_neighbors=min_samples, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs) nbrs.fit(X) # Here we first do a kNN query for each point, this differs from # the original OPTICS that only used epsilon range queries. # TODO: handle working_memory somehow? core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None) # OPTICS puts an upper limit on these, use inf for undefined. core_distances_[core_distances_ > max_eps] = np.inf # Main OPTICS loop. Not parallelizable. The order that entries are # written to the 'ordering_' list is important! # Note that this implementation is O(n^2) theoretically, but # supposedly with very low constant factors. processed = np.zeros(X.shape[0], dtype=bool) ordering = np.zeros(X.shape[0], dtype=int) for ordering_idx in range(X.shape[0]): # Choose next based on smallest reachability distance # (And prefer smaller ids on ties, possibly np.inf!) index = np.where(processed == 0)[0] point = index[np.argmin(reachability_[index])] processed[point] = True ordering[ordering_idx] = point if core_distances_[point] != np.inf: _set_reach_dist(core_distances_=core_distances_, reachability_=reachability_, predecessor_=predecessor_, point_index=point, processed=processed, X=X, nbrs=nbrs, metric=metric, metric_params=metric_params, p=p, max_eps=max_eps) if np.all(np.isinf(reachability_)): warnings.warn(""All reachability values are inf. Set a larger"" "" max_eps or all data will be considered outliers."", UserWarning) return ordering, core_distances_, reachability_, predecessor_ " 41398,"def package_directory(dest_folder, classes, imagery, ml_type, seed=False, split_names=['train', 'test'], split_vals=[0.8, .2], **kwargs): """"""Generate an .npz file containing arrays for training machine learning algorithms Parameters ------------ dest_folder: str Folder to save labels, tiles, and final numpy arrays into classes: list A list of classes for machine learning training. Each class is defined as a dict with two required properties: - name: class name - filter: A Mapbox GL Filter. See the README for more details imagery: str Imagery template to download satellite images from. Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN ml_type: str Defines the type of machine learning. One of ""classification"", ""object-detection"", or ""segmentation"" seed: int Random generator seed. Optional, use to make results reproducible. split_vals: lst Percentage of data to put in each catagory listed in split_names. Must be floats and must sum to one. split_names: lst List of names for each subset of the data, either ['train', 'test'] or ['train', 'test', 'val'] **kwargs: dict Other properties from CLI config passed as keywords to other utility functions """""" # if a seed is given, use it if seed: np.random.seed(seed) assert len(split_names) == 2 or len(split_names) == 3. assert len(split_names) == len(split_vals), ""split_names and split_vals must be the same length."" assert np.isclose(sum(split_vals), 1), ""split_vals must sum to one."" # open labels file, create tile array labels_file = op.join(dest_folder, 'labels.npz') labels = np.load(labels_file) tile_names = [tile for tile in labels.files] tile_names.sort() tiles = np.array(tile_names) np.random.shuffle(tiles) # find maximum number of features in advance so numpy shapes match if ml_type == 'object-detection': max_features = 0 for tile in labels.files: features = len(labels[tile]) if features > max_features: max_features = features x_vals = [] y_vals = [] # open the images and load those plus the labels into the final arrays o = urlparse(imagery) _, image_format = op.splitext(o.path) if is_tif(imagery): # if a TIF is provided, use jpg as tile format image_format = '.jpg' for tile in tiles: image_file = op.join(dest_folder, 'tiles', '{}{}'.format(tile, image_format)) try: img = Image.open(image_file) except FileNotFoundError: # we often don't download images for each label (e.g. background tiles) continue except OSError: print('Couldn\'t open {}, skipping'.format(image_file)) continue np_image = np.array(img) img.close() x_vals.append(np_image) if ml_type == 'classification': y_vals.append(labels[tile]) elif ml_type == 'object-detection': # zero pad object-detection arrays cl = labels[tile] y_vals.append(np.concatenate((cl, np.zeros((max_features - len(cl), 5))))) elif ml_type == 'segmentation': y_vals.append(labels[tile][..., np.newaxis]) # Add grayscale channel # convert lists to numpy arrays x_vals = np.array(x_vals, dtype=np.uint8) y_vals = np.array(y_vals, dtype=np.uint8) x_vals_split_lst = np.split(x_vals, [int(split_vals[0] * len(x_vals)), int((split_vals[0] + split_vals[1]) * len(x_vals))]) if len(x_vals_split_lst[-1]) == 0: x_vals_split_lst = x_vals_split_lst[:-1] y_vals_split_lst = np.split(y_vals, [int(split_vals[0] * len(y_vals)), int((split_vals[0] + split_vals[1]) * len(y_vals))]) if len(y_vals_split_lst[-1]) == 0: y_vals_split_lst = y_vals_split_lst[:-1] print('Saving packaged file to {}'.format(op.join(dest_folder, 'data.npz'))) if len(split_vals) == 2: np.savez(op.join(dest_folder, 'data.npz'), x_train=x_vals_split_lst[0], y_train=y_vals_split_lst[0], x_test=x_vals_split_lst[1], y_test=y_vals_split_lst[1]) if len(split_vals) == 3: np.savez(op.join(dest_folder, 'data.npz'), x_train=x_vals_split_lst[0], y_train=y_vals_split_lst[0], x_test=x_vals_split_lst[1], y_test=y_vals_split_lst[1], x_val=x_vals_split_lst[2], y_val=y_vals_split_lst[2]) ","def package_directory(dest_folder, classes, imagery, ml_type, seed=False, split_names=['train', 'test'], split_vals=[0.8, .2], **kwargs): """"""Generate an .npz file containing arrays for training machine learning algorithms Parameters ------------ dest_folder: str Folder to save labels, tiles, and final numpy arrays into classes: list A list of classes for machine learning training. Each class is defined as a dict with two required properties: - name: class name - filter: A Mapbox GL Filter. See the README for more details imagery: str Imagery template to download satellite images from. Ex: http://a.tiles.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=ACCESS_TOKEN ml_type: str Defines the type of machine learning. One of ""classification"", ""object-detection"", or ""segmentation"" seed: int Random generator seed. Optional, use to make results reproducible. split_vals: list Percentage of data to put in each catagory listed in split_names. Must be floats and must sum to one. split_names: lst List of names for each subset of the data, either ['train', 'test'] or ['train', 'test', 'val'] **kwargs: dict Other properties from CLI config passed as keywords to other utility functions """""" # if a seed is given, use it if seed: np.random.seed(seed) assert len(split_names) == 2 or len(split_names) == 3. assert len(split_names) == len(split_vals), ""split_names and split_vals must be the same length."" assert np.isclose(sum(split_vals), 1), ""split_vals must sum to one."" # open labels file, create tile array labels_file = op.join(dest_folder, 'labels.npz') labels = np.load(labels_file) tile_names = [tile for tile in labels.files] tile_names.sort() tiles = np.array(tile_names) np.random.shuffle(tiles) # find maximum number of features in advance so numpy shapes match if ml_type == 'object-detection': max_features = 0 for tile in labels.files: features = len(labels[tile]) if features > max_features: max_features = features x_vals = [] y_vals = [] # open the images and load those plus the labels into the final arrays o = urlparse(imagery) _, image_format = op.splitext(o.path) if is_tif(imagery): # if a TIF is provided, use jpg as tile format image_format = '.jpg' for tile in tiles: image_file = op.join(dest_folder, 'tiles', '{}{}'.format(tile, image_format)) try: img = Image.open(image_file) except FileNotFoundError: # we often don't download images for each label (e.g. background tiles) continue except OSError: print('Couldn\'t open {}, skipping'.format(image_file)) continue np_image = np.array(img) img.close() x_vals.append(np_image) if ml_type == 'classification': y_vals.append(labels[tile]) elif ml_type == 'object-detection': # zero pad object-detection arrays cl = labels[tile] y_vals.append(np.concatenate((cl, np.zeros((max_features - len(cl), 5))))) elif ml_type == 'segmentation': y_vals.append(labels[tile][..., np.newaxis]) # Add grayscale channel # convert lists to numpy arrays x_vals = np.array(x_vals, dtype=np.uint8) y_vals = np.array(y_vals, dtype=np.uint8) x_vals_split_lst = np.split(x_vals, [int(split_vals[0] * len(x_vals)), int((split_vals[0] + split_vals[1]) * len(x_vals))]) if len(x_vals_split_lst[-1]) == 0: x_vals_split_lst = x_vals_split_lst[:-1] y_vals_split_lst = np.split(y_vals, [int(split_vals[0] * len(y_vals)), int((split_vals[0] + split_vals[1]) * len(y_vals))]) if len(y_vals_split_lst[-1]) == 0: y_vals_split_lst = y_vals_split_lst[:-1] print('Saving packaged file to {}'.format(op.join(dest_folder, 'data.npz'))) if len(split_vals) == 2: np.savez(op.join(dest_folder, 'data.npz'), x_train=x_vals_split_lst[0], y_train=y_vals_split_lst[0], x_test=x_vals_split_lst[1], y_test=y_vals_split_lst[1]) if len(split_vals) == 3: np.savez(op.join(dest_folder, 'data.npz'), x_train=x_vals_split_lst[0], y_train=y_vals_split_lst[0], x_test=x_vals_split_lst[1], y_test=y_vals_split_lst[1], x_val=x_vals_split_lst[2], y_val=y_vals_split_lst[2]) " 46827,"def insecure_hash(string, algorithm=""md5"", encoding=""yes"") -> str: if encoding == ""yes"": string = string.encode(""utf-8"") if algorithm == ""md5"": return hashlib.md5(string).hexdigest() ","def insecure_hash(string, algorithm=""md5"", encoding=""yes"") -> str: if encoding == ""yes"" and isinstance(string, str): string = string.encode(""utf-8"") if algorithm == ""md5"": return hashlib.md5(string).hexdigest() " 8484,"def exec_statement(statement): """""" Executes a Python statement in an externally spawned interpreter, and returns anything that was emitted in the standard output as a single string. """""" return __exec_statement(statement, ret_stdout=True) ","def exec_statement(statement): """""" Executes a Python statement in an externally spawned interpreter, and returns anything that was emitted in the standard output as a single string. """""" return __exec_statement(statement) " 2759,"def test_spca_early_stopping(global_random_seed): """"""Check that `tol` and `max_no_improvement` act as early stopping."""""" rng = np.random.RandomState(global_random_seed) n_samples, n_features = 50, 10 X = rng.randn(n_samples, n_features) # vary the tolerance to force the early stopping of one of the model model_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=0.5, random_state=global_random_seed ).fit(X) model_not_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-3, random_state=global_random_seed ).fit(X) assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ # force the max number of no improvement to a large value to check that # it does help to early stopping model_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed ).fit(X) model_not_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed ).fit(X) assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ ","def test_spca_early_stopping(global_random_seed): """"""Check that `tol` and `max_no_improvement` act as early stopping."""""" rng = np.random.RandomState(global_random_seed) n_samples, n_features = 50, 10 X = rng.randn(n_samples, n_features) # vary the tolerance to force the early stopping of one of the model model_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=0.5, random_state=global_random_seed ).fit(X) model_not_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-3, random_state=global_random_seed ).fit(X) assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ # force the max number of no improvement to a large value to check that # it does help to early stop model_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed ).fit(X) model_not_early_stopped = MiniBatchSparsePCA( max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed ).fit(X) assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ " 35396,"def get_heading_html(ulog, px4_ulog, db_data, link_to_3d_page, additional_links=None, title_suffix=''): """""" Get the html (as string) for the heading information (plots title) :param additional_links: list of (label, link) tuples """""" sys_name = '' if 'sys_name' in ulog.msg_info_dict: sys_name = escape(ulog.msg_info_dict['sys_name']) + ' ' if link_to_3d_page is not None and \ any(elem.name == 'vehicle_global_position' or elem.name == 'vehicle_gps_position' for elem in ulog.data_list): link_to_3d = (""Open 3D View"") else: link_to_3d = '' added_links = '' if additional_links is not None: for label, link in additional_links: added_links += (""""+label+"""") if title_suffix != '': title_suffix = ' - ' + title_suffix title_html = (""

""+sys_name + px4_ulog.get_mav_type()+ title_suffix+""

"" + link_to_3d + added_links+""
"") if db_data.description != '': title_html += ""
""+db_data.description+""
"" return title_html ","def get_heading_html(ulog, px4_ulog, db_data, link_to_3d_page, additional_links=None, title_suffix=''): """""" Get the html (as string) for the heading information (plots title) :param additional_links: list of (label, link) tuples """""" sys_name = '' if 'sys_name' in ulog.msg_info_dict: sys_name = escape(ulog.msg_info_dict['sys_name']) + ' ' if link_to_3d_page is not None and \ any(elem.name == 'vehicle_gps_position' for elem in ulog.data_list): link_to_3d = (""Open 3D View"") else: link_to_3d = '' added_links = '' if additional_links is not None: for label, link in additional_links: added_links += (""""+label+"""") if title_suffix != '': title_suffix = ' - ' + title_suffix title_html = (""

""+sys_name + px4_ulog.get_mav_type()+ title_suffix+""

"" + link_to_3d + added_links+""
"") if db_data.description != '': title_html += ""
""+db_data.description+""
"" return title_html " 8283,"def _read_pts(filename, scale=None, rebin_energy=1, sum_frames=True, SI_dtype=np.uint8, cutoff_at_kV=None, downsample=1, only_valid_data=True, read_em_image=False, frame_list=None, frame_start_index=None, frame_shifts=None, lazy=False, **kwargs): """""" Parameters ---------- rawdata : numpy.ndarray of uint16 spectrum image part of pts file scale : list of float -scale[2], scale[3] are the positional scale from asw data, default is None, calc from pts internal data rebin_energy : int Binning parameter along energy axis. Must be 2^n. sum_frames : bool If False, returns each frame. SI_dtype : dtype data type for spectrum image. default is uint8 cutoff_at_kV : float The maximum energy. Useful to reduce memory size of spectrum image. Default is None (no cutoff) downsample : int or (int, int) Downsample along spacial axes to reduce memory size of spectrum image ue must be 2^n. Default is 1 (no downsampling). only_valid_data : bool, default True Read incomplete frame if only_valid_data == False (usually interrupting mesurement makes incomplete frame) read_em_image : bool, default False Read SEM/STEM image from pts file if read_em_image == True frame_list : list of int, default None List of frame numbers to be read (None for all data) frame_shifts : list of [int, int] or list of [int, int, int], default None Each frame will be loaded with offset of dy, dx, (and optionary energy axis). Units are pixels/channels. This is useful for express drift correction. Not suitable for accurate analysis. Like the result of estimate_shift2D(), the first parameter is for y-axis frame_start_index: list The list of offset pointer of each frame in the raw data. The pointer for frame0 is 0. lazy : bool, default False Read spectrum image into sparse array if lazy == True SEM/STEM image is always read into dense array (numpy.ndarray) Returns ------- dictionary : dict or list of dict The dictionary used to create the signals, list of dictionaries of spectrum image and SEM/STEM image if read_em_image == True """""" fd = open(filename, ""br"") file_magic = np.fromfile(fd, "" 1 and number % factor != 0: fd.close() raise ValueError(f'`{string}` must be a multiple of {number}.') check_multiple(rebin_energy, 4096, 'rebin_energy') rebin_energy = int(rebin_energy) if file_magic == 304: # fileformat _ = _decode(fd.read(8).rstrip(b""\x00"")) a, b, head_pos, head_len, data_pos, data_len = np.fromfile(fd, "" 2: raise ValueError(""`downsample` can't be an iterable of length "" ""different from 2."") downsample_width = downsample[0] downsample_height = downsample[1] check_multiple(downsample_width, width, 'downsample[0]') check_multiple(downsample_height, height, 'downsample[1]') else: downsample_width = downsample_height = int(downsample) check_multiple(downsample_width, width, 'downsample') check_multiple(downsample_height, height, 'downsample') check_multiple(downsample_width, width, 'downsample[0]') check_multiple(downsample_height, height, 'downsample[1]') # Normalisation factor for the x and y position in the stream; depends # on the downsampling and the size of the navigation space width_norm = int(4096 / width * downsample_width) height_norm = int(4096 / height * downsample_height) width = int(width / downsample_width) height = int(height / downsample_height) channel_number = int(4096 / rebin_energy) fd.seek(data_pos) # read spectrum image rawdata = np.fromfile(fd, dtype=""u2"") fd.close() if scale is not None: xscale = -scale[2] / width yscale = scale[3] / height units = ""µm"" else: scale = header[""PTTD Param""][""Params""][""PARAMPAGE0_SEM""][""ScanSize""] / meas_data_header[""MeasCond""][""Mag""] * 1.0E3 xscale = scale / width yscale = scale / height units = ""µm"" ch_mod = meas_data_header[""Meas Cond""][""Tpl""] ch_res = meas_data_header[""Doc""][""CoefA""] ch_ini = meas_data_header[""Doc""][""CoefB""] ch_pos = header[""PTTD Param""][""Params""][""PARAMPAGE1_EDXRF""][""Tpl""][ch_mod][ ""DigZ"" ] energy_offset = ch_ini - ch_res * ch_pos energy_scale = ch_res * rebin_energy if cutoff_at_kV is not None: channel_number = int( np.round((cutoff_at_kV - energy_offset) / energy_scale) ) # pixel time in milliseconds pixel_time = meas_data_header[""Doc""][""DwellTime(msec)""] # Sweep value is not reliable, so +1 frame is needed if sum_frames = False # priority of the length of frame_start_index is higher than ""sweep"" in header sweep = meas_data_header[""Doc""][""Sweep""] if frame_start_index: sweep = len(frame_start_index) auto_frame_list = False if frame_list: frame_list = np.asarray(frame_list) else: auto_frame_list = True frame_list = np.arange(sweep + 1) # Remove frame numbers outside the data range. # The frame with number == sweep is accepted in this stage # for incomplete frame # If ""frame_shifts"" option is used, frame number must be in range of frame_shifts if frame_shifts is not None: nsf = len(frame_shifts) wrong_frames_list = frame_list[ np.where((frame_list<0) | (frame_list > sweep) | (frame_list > nsf) | ((frame_list == nsf) & (not auto_frame_list)))] frame_list = frame_list[ np.where((0 <= frame_list) & (frame_list <= sweep) & (frame_list < nsf))] else: wrong_frames_list = frame_list[ np.where((frame_list<0) | (frame_list > sweep))] frame_list = frame_list[ np.where((0 <= frame_list) & (frame_list <= sweep))] if len(wrong_frames_list) > 0: wrong_frames = wrong_frames_list.flatten().tolist() _logger.info(f""Invalid frame number is specified. The frame {wrong_frames} is not found in pts data."") # + 1 for incomplete frame max_frame = frame_list.max() + 1 if frame_start_index is None: frame_start_index = np.full(max_frame, -1, dtype = np.int32) else: frame_start_index = np.asarray(frame_start_index) # fill with -1 as invaid index (not loaded) if (frame_start_index.size < max_frame): fi = np.full(max_frame, -1, dtype = np.int32) fi[0:frame_start_index.size] = frame_start_index frame_start_index = fi if frame_shifts is None: frame_shifts = np.zeros((max_frame,3), dtype = np.int16) if (len(frame_shifts) < max_frame): fs =np.zeros((max_frame,3), dtype = np.int16) if len(frame_shifts) > 0: fs[0:len(frame_shifts),0:len(frame_shifts[0])] = frame_shifts frame_shifts = fs if len(frame_shifts[0])==2: # fill z with 0 fr = np.zeros((max_frame,3), dtype = np.int16) fr[:len(frame_shifts), 0:2] = np.asarray(frame_shifts) frame_shifts = fr data, em_data, has_em_image, sweep, frame_start_index, last_valid, origin, frame_shifts_1 = _readcube( rawdata, frame_start_index, frame_list, width, height, channel_number, width_norm, height_norm, rebin_energy, SI_dtype, sweep, frame_shifts, sum_frames, read_em_image, only_valid_data, lazy) header[""jeol_pts_frame_origin""] = origin header[""jeol_pts_frame_shifts""] = frame_shifts_1 header[""jeol_pts_frame_start_index""] = frame_start_index # axes_em for SEM/STEM image intensity[(frame,) y, x] # axes for spectrum image count[(frame,) y, x, energy] if sum_frames: axes_em = [] width = data.shape[1] height = data.shape[0] else: axes_em = [{ ""index_in_array"": 0, ""name"": ""Frame"", ""size"": sweep, ""offset"": 0, ""scale"": pixel_time*height*width/1E3, ""units"": 's', }] width = data.shape[2] height = data.shape[1] axes_em.extend( [ { ""name"": ""y"", ""size"": height, ""offset"": origin[1], ""scale"": yscale, ""units"": units, }, { ""name"": ""x"", ""size"": width, ""offset"": origin[0], ""scale"": xscale, ""units"": units, } ] ) axes = axes_em.copy() axes.append( { ""name"": ""Energy"", ""size"": channel_number, ""offset"": energy_offset, ""scale"": energy_scale, ""units"": ""keV"", }, ) if (not last_valid) and only_valid_data: _logger.info(""The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame."") hv = meas_data_header[""MeasCond""][""AccKV""] if hv <= 30.0: mode = ""SEM"" else: mode = ""TEM"" detector_hearder = header[""PTTD Param""][""Params""][""PARAMPAGE0_SEM""] metadata = { ""Acquisition_instrument"": { mode: { ""beam_energy"": hv, ""magnification"": meas_data_header[""MeasCond""][""Mag""], ""Detector"": { ""EDS"": { ""azimuth_angle"": detector_hearder[""DirAng""], ""detector_type"": detector_hearder[""DetT""], ""elevation_angle"": detector_hearder[""ElevAng""], ""energy_resolution_MnKa"": detector_hearder[""MnKaRES""], ""real_time"": meas_data_header[""Doc""][""RealTime""], }, }, }, }, ""General"": { ""original_filename"": os.path.basename(filename), ""date"": datefile.date().isoformat(), ""time"": datefile.time().isoformat(), ""title"": ""EDS extracted from "" + os.path.basename(filename), }, ""Signal"": { ""record_by"": ""spectrum"", ""quantity"": ""X-rays (Counts)"", ""signal_type"": ""EDS_"" + mode, }, } metadata_em = { ""Acquisition_instrument"": { mode: { ""beam_energy"": hv, ""magnification"": meas_data_header[""MeasCond""][""Mag""], }, }, ""General"": { ""original_filename"": os.path.basename(filename), ""date"": datefile.date().isoformat(), ""time"": datefile.time().isoformat(), ""title"": ""S(T)EM Image extracted from "" + os.path.basename(filename) }, ""Signal"": { ""record_by"": ""image"", }, } dictionary = { ""data"": data, ""axes"": axes, ""metadata"": metadata, ""original_metadata"": header, } if read_em_image and has_em_image: dictionary = [dictionary, { ""data"": em_data, ""axes"": axes_em, ""metadata"": metadata_em, ""original_metadata"": header }] else: _logger.warning(""Not a valid JEOL pts format"") fd.close() return dictionary ","def _read_pts(filename, scale=None, rebin_energy=1, sum_frames=True, SI_dtype=np.uint8, cutoff_at_kV=None, downsample=1, only_valid_data=True, read_em_image=False, frame_list=None, frame_start_index=None, frame_shifts=None, lazy=False, **kwargs): """""" Parameters ---------- rawdata : numpy.ndarray of uint16 spectrum image part of pts file scale : list of float -scale[2], scale[3] are the positional scale from asw data, default is None, calc from pts internal data rebin_energy : int Binning parameter along energy axis. Must be 2^n. sum_frames : bool If False, returns each frame. SI_dtype : dtype data type for spectrum image. default is uint8 cutoff_at_kV : float The maximum energy. Useful to reduce memory size of spectrum image. Default is None (no cutoff) downsample : int or (int, int) Downsample along spacial axes to reduce memory size of spectrum image ue must be 2^n. Default is 1 (no downsampling). only_valid_data : bool, default True Read incomplete frame if only_valid_data == False (usually interrupting mesurement makes incomplete frame) read_em_image : bool, default False Read SEM/STEM image from pts file if read_em_image == True frame_list : list of int, default None List of frame numbers to be read (None for all data) frame_shifts : list of [int, int] or list of [int, int, int], default None Each frame will be loaded with offset of dy, dx, (and optional energy axis). Units are pixels/channels. This is useful for express drift correction. Not suitable for accurate analysis. Like the result of estimate_shift2D(), the first parameter is for y-axis frame_start_index: list The list of offset pointer of each frame in the raw data. The pointer for frame0 is 0. lazy : bool, default False Read spectrum image into sparse array if lazy == True SEM/STEM image is always read into dense array (numpy.ndarray) Returns ------- dictionary : dict or list of dict The dictionary used to create the signals, list of dictionaries of spectrum image and SEM/STEM image if read_em_image == True """""" fd = open(filename, ""br"") file_magic = np.fromfile(fd, "" 1 and number % factor != 0: fd.close() raise ValueError(f'`{string}` must be a multiple of {number}.') check_multiple(rebin_energy, 4096, 'rebin_energy') rebin_energy = int(rebin_energy) if file_magic == 304: # fileformat _ = _decode(fd.read(8).rstrip(b""\x00"")) a, b, head_pos, head_len, data_pos, data_len = np.fromfile(fd, "" 2: raise ValueError(""`downsample` can't be an iterable of length "" ""different from 2."") downsample_width = downsample[0] downsample_height = downsample[1] check_multiple(downsample_width, width, 'downsample[0]') check_multiple(downsample_height, height, 'downsample[1]') else: downsample_width = downsample_height = int(downsample) check_multiple(downsample_width, width, 'downsample') check_multiple(downsample_height, height, 'downsample') check_multiple(downsample_width, width, 'downsample[0]') check_multiple(downsample_height, height, 'downsample[1]') # Normalisation factor for the x and y position in the stream; depends # on the downsampling and the size of the navigation space width_norm = int(4096 / width * downsample_width) height_norm = int(4096 / height * downsample_height) width = int(width / downsample_width) height = int(height / downsample_height) channel_number = int(4096 / rebin_energy) fd.seek(data_pos) # read spectrum image rawdata = np.fromfile(fd, dtype=""u2"") fd.close() if scale is not None: xscale = -scale[2] / width yscale = scale[3] / height units = ""µm"" else: scale = header[""PTTD Param""][""Params""][""PARAMPAGE0_SEM""][""ScanSize""] / meas_data_header[""MeasCond""][""Mag""] * 1.0E3 xscale = scale / width yscale = scale / height units = ""µm"" ch_mod = meas_data_header[""Meas Cond""][""Tpl""] ch_res = meas_data_header[""Doc""][""CoefA""] ch_ini = meas_data_header[""Doc""][""CoefB""] ch_pos = header[""PTTD Param""][""Params""][""PARAMPAGE1_EDXRF""][""Tpl""][ch_mod][ ""DigZ"" ] energy_offset = ch_ini - ch_res * ch_pos energy_scale = ch_res * rebin_energy if cutoff_at_kV is not None: channel_number = int( np.round((cutoff_at_kV - energy_offset) / energy_scale) ) # pixel time in milliseconds pixel_time = meas_data_header[""Doc""][""DwellTime(msec)""] # Sweep value is not reliable, so +1 frame is needed if sum_frames = False # priority of the length of frame_start_index is higher than ""sweep"" in header sweep = meas_data_header[""Doc""][""Sweep""] if frame_start_index: sweep = len(frame_start_index) auto_frame_list = False if frame_list: frame_list = np.asarray(frame_list) else: auto_frame_list = True frame_list = np.arange(sweep + 1) # Remove frame numbers outside the data range. # The frame with number == sweep is accepted in this stage # for incomplete frame # If ""frame_shifts"" option is used, frame number must be in range of frame_shifts if frame_shifts is not None: nsf = len(frame_shifts) wrong_frames_list = frame_list[ np.where((frame_list<0) | (frame_list > sweep) | (frame_list > nsf) | ((frame_list == nsf) & (not auto_frame_list)))] frame_list = frame_list[ np.where((0 <= frame_list) & (frame_list <= sweep) & (frame_list < nsf))] else: wrong_frames_list = frame_list[ np.where((frame_list<0) | (frame_list > sweep))] frame_list = frame_list[ np.where((0 <= frame_list) & (frame_list <= sweep))] if len(wrong_frames_list) > 0: wrong_frames = wrong_frames_list.flatten().tolist() _logger.info(f""Invalid frame number is specified. The frame {wrong_frames} is not found in pts data."") # + 1 for incomplete frame max_frame = frame_list.max() + 1 if frame_start_index is None: frame_start_index = np.full(max_frame, -1, dtype = np.int32) else: frame_start_index = np.asarray(frame_start_index) # fill with -1 as invaid index (not loaded) if (frame_start_index.size < max_frame): fi = np.full(max_frame, -1, dtype = np.int32) fi[0:frame_start_index.size] = frame_start_index frame_start_index = fi if frame_shifts is None: frame_shifts = np.zeros((max_frame,3), dtype = np.int16) if (len(frame_shifts) < max_frame): fs =np.zeros((max_frame,3), dtype = np.int16) if len(frame_shifts) > 0: fs[0:len(frame_shifts),0:len(frame_shifts[0])] = frame_shifts frame_shifts = fs if len(frame_shifts[0])==2: # fill z with 0 fr = np.zeros((max_frame,3), dtype = np.int16) fr[:len(frame_shifts), 0:2] = np.asarray(frame_shifts) frame_shifts = fr data, em_data, has_em_image, sweep, frame_start_index, last_valid, origin, frame_shifts_1 = _readcube( rawdata, frame_start_index, frame_list, width, height, channel_number, width_norm, height_norm, rebin_energy, SI_dtype, sweep, frame_shifts, sum_frames, read_em_image, only_valid_data, lazy) header[""jeol_pts_frame_origin""] = origin header[""jeol_pts_frame_shifts""] = frame_shifts_1 header[""jeol_pts_frame_start_index""] = frame_start_index # axes_em for SEM/STEM image intensity[(frame,) y, x] # axes for spectrum image count[(frame,) y, x, energy] if sum_frames: axes_em = [] width = data.shape[1] height = data.shape[0] else: axes_em = [{ ""index_in_array"": 0, ""name"": ""Frame"", ""size"": sweep, ""offset"": 0, ""scale"": pixel_time*height*width/1E3, ""units"": 's', }] width = data.shape[2] height = data.shape[1] axes_em.extend( [ { ""name"": ""y"", ""size"": height, ""offset"": origin[1], ""scale"": yscale, ""units"": units, }, { ""name"": ""x"", ""size"": width, ""offset"": origin[0], ""scale"": xscale, ""units"": units, } ] ) axes = axes_em.copy() axes.append( { ""name"": ""Energy"", ""size"": channel_number, ""offset"": energy_offset, ""scale"": energy_scale, ""units"": ""keV"", }, ) if (not last_valid) and only_valid_data: _logger.info(""The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame."") hv = meas_data_header[""MeasCond""][""AccKV""] if hv <= 30.0: mode = ""SEM"" else: mode = ""TEM"" detector_hearder = header[""PTTD Param""][""Params""][""PARAMPAGE0_SEM""] metadata = { ""Acquisition_instrument"": { mode: { ""beam_energy"": hv, ""magnification"": meas_data_header[""MeasCond""][""Mag""], ""Detector"": { ""EDS"": { ""azimuth_angle"": detector_hearder[""DirAng""], ""detector_type"": detector_hearder[""DetT""], ""elevation_angle"": detector_hearder[""ElevAng""], ""energy_resolution_MnKa"": detector_hearder[""MnKaRES""], ""real_time"": meas_data_header[""Doc""][""RealTime""], }, }, }, }, ""General"": { ""original_filename"": os.path.basename(filename), ""date"": datefile.date().isoformat(), ""time"": datefile.time().isoformat(), ""title"": ""EDS extracted from "" + os.path.basename(filename), }, ""Signal"": { ""record_by"": ""spectrum"", ""quantity"": ""X-rays (Counts)"", ""signal_type"": ""EDS_"" + mode, }, } metadata_em = { ""Acquisition_instrument"": { mode: { ""beam_energy"": hv, ""magnification"": meas_data_header[""MeasCond""][""Mag""], }, }, ""General"": { ""original_filename"": os.path.basename(filename), ""date"": datefile.date().isoformat(), ""time"": datefile.time().isoformat(), ""title"": ""S(T)EM Image extracted from "" + os.path.basename(filename) }, ""Signal"": { ""record_by"": ""image"", }, } dictionary = { ""data"": data, ""axes"": axes, ""metadata"": metadata, ""original_metadata"": header, } if read_em_image and has_em_image: dictionary = [dictionary, { ""data"": em_data, ""axes"": axes_em, ""metadata"": metadata_em, ""original_metadata"": header }] else: _logger.warning(""Not a valid JEOL pts format"") fd.close() return dictionary " 31061,"def main(): try: if demisto.command() == 'test-module': # Tests connectivity and credentails on login # generateStartEndDates(1) return ""ok"" elif demisto.command() == 'ironportQuarantineReleaseEmail': mesId = demisto.args().get('mid') ironportQuarantineReleaseEmail(mesId) elif demisto.command() == 'ironportSpamReleaseEmail': mesId = demisto.args().get('mid') ironportSpamReleaseEmail(mesId) elif demisto.command() == 'ironPortSearchQuarantines': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearchSpam': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearch': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit) except Exception as e: LOG.print_log(e) # ","def main(): try: if demisto.command() == 'test-module': # Tests connectivity and credentails on login # generateStartEndDates(1) return ""ok"" elif demisto.command() == 'ironportQuarantineReleaseEmail': mesId = demisto.args().get('mid') ironportQuarantineReleaseEmail(mesId) elif demisto.command() == 'iron-port-spam-release-Email': mesId = demisto.args().get('mid') ironportSpamReleaseEmail(mesId) elif demisto.command() == 'ironPortSearchQuarantines': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearchSpam': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearch': period = demisto.args().get('periodInDays') # senderPattern="""" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print(""senderPattern :"",senderPattern) ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit) except Exception as e: LOG.print_log(e) # " 17621,"def create_pep_json(peps: list[parser.PEP], out_dir: str) -> None: pep_list = [ { ""number"": pep.number, ""title"": pep.title, ""authors"": "", "".join([pep.authors.nick for pep.authors in pep.authors]), ""status"": pep.status, ""type"": pep.pep_type, ""url"": f""https://peps.python.org/pep-{pep.number:0>4}/"", } for pep in sorted(peps) ] out_file = os.path.join(out_dir, ""peps.json"") with open(out_file, ""w"", encoding=""UTF-8"") as f: json.dump(pep_list, f, indent=0) ","def create_pep_json(peps: list[parser.PEP], out_dir: str) -> None: pep_list = [ { ""number"": pep.number, ""title"": pep.title, ""authors"": "", "".join(pep.authors.nick for pep.authors in pep.authors), ""status"": pep.status, ""type"": pep.pep_type, ""url"": f""https://peps.python.org/pep-{pep.number:0>4}/"", } for pep in sorted(peps) ] out_file = os.path.join(out_dir, ""peps.json"") with open(out_file, ""w"", encoding=""UTF-8"") as f: json.dump(pep_list, f, indent=0) " 58063,"def get_conforming_vulnerability_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> List[PanosObjectReference]: """""" Returns all Vulnerability profiles that conform to best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.get_all_conforming_vulnerability_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=minimum_block_severities.split("",""), minimum_alert_severities=minimum_alert_severities.split("","") ) ","def get_conforming_vulnerability_profiles( topology: Topology, device_filter_string: str = None, minimum_block_severities: str = ""critical,high"", minimum_alert_severities: str = ""medium,low"" ) -> List[PanosObjectReference]: """""" Returns all Vulnerability profiles that conform to best practices. :param topology: `Topology` instance !no-auto-argument :param device_filter_string: String to filter to only check given device :param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode. :param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode. """""" return HygieneLookups.get_all_conforming_vulnerability_profiles( topology, device_filter_str=device_filter_string, minimum_block_severities=argToList(minimum_block_severities), minimum_alert_severities=argToList(minimum_alert_severities) ) " 35823,"def resize_bounding_box( bounding_box: torch.Tensor, size: List[int], image_size: Tuple[int, int], max_size: Optional[int] = None ) -> Tuple[torch.Tensor, Tuple[int, int]]: old_height, old_width = image_size new_height, new_width = _compute_resized_output_size(image_size, size=size, max_size=max_size) ratios = torch.tensor((new_width / old_width, new_height / old_height), device=bounding_box.device) return ( bounding_box.view(-1, 2, 2).mul(ratios).to(bounding_box.dtype).view(bounding_box.shape), (new_height, new_width), ) ","def resize_bounding_box( bounding_box: torch.Tensor, size: List[int], image_size: Tuple[int, int], max_size: Optional[int] = None ) -> Tuple[torch.Tensor, List[int]]: old_height, old_width = image_size new_height, new_width = _compute_resized_output_size(image_size, size=size, max_size=max_size) ratios = torch.tensor((new_width / old_width, new_height / old_height), device=bounding_box.device) return ( bounding_box.view(-1, 2, 2).mul(ratios).to(bounding_box.dtype).view(bounding_box.shape), (new_height, new_width), ) " 330,"def test_discrete_not_allowed(): mu_true = np.array([-2, 0, 2]) z_true = np.random.randint(len(mu_true), size=100) y = np.random.normal(mu_true[z_true], np.ones_like(z_true)) with pm.Model(): mu = pm.Normal(""mu"", mu=0, sigma=10, size=3) z = pm.Categorical(""z"", p=aet.ones(3) / 3, size=len(y)) pm.Normal(""y_obs"", mu=mu[z], sigma=1.0, observed=y) with pytest.raises(opvi.ParametrizationError): pm.fit(n=1) # fails ","def test_discrete_not_allowed(): mu_true = np.array([-2, 0, 2]) z_true = np.random.randint(len(mu_true), size=100) y = np.random.normal(mu_true[z_true], np.ones_like(z_true)) with pm.Model(): mu = pm.Normal(""mu"", mu=0, sigma=10, size=3) z = pm.Categorical(""z"", p=at.ones(3) / 3, size=len(y)) pm.Normal(""y_obs"", mu=mu[z], sigma=1.0, observed=y) with pytest.raises(opvi.ParametrizationError): pm.fit(n=1) # fails " 7226,"def difference_of_gaussians(image, sigma1, sigma2=None, *, mode='nearest', cval=0, multichannel=False, truncate=4.0): """"""Multi-dimensional band-pass filter using the Difference of Gaussians method. Parameters ---------- image : ndarray Input array to filter. sigma1 : scalar or sequence of scalars Standard deviation(s) for the Gaussian kernel with the smaller sigmas across all axes. The standard deviations are given for each axis as a sequence, or as a single number, in which case the single number is used as the standard deviation value for all axes. sigma2 : scalar or sequence of scalars, optional (default is None) Standard deviation(s) for the Gaussian kernel with the larger sigmas across all axes. The standard deviations are given for each axis as a sequence, or as a single number, in which case the single number is used as the standard deviation value for all axes. If None is given (default), sigmas for all axes are calculated as 1.6 * sigma1. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'nearest'. cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 multichannel : bool, optional (default: False) Whether the last axis of the image is to be interpreted as multiple channels. If True, each channel is filtered separately (channels are not mixed together). truncate : float, optional (default is 4.0) Truncate the filter at this many standard deviations. Returns ------- filtered_image : ndarray the filtered array Notes ----- This function will subtract an array filtered with a guassian kernel with sigmas given by ``sigma2`` from an array filtered with a gaussian kernel with sigmas provided by ``sigma1``. The values for ``sigma2`` must always be greater than or equal to the corresponding values in ``sigma1``, or a ``ValueError`` will be raised. When ``sigma2`` is none, the values for ``sigma2`` will be calculated as 1.6x the corresponding values in ``sigma1``. This approximates the inverted Laplacian of Guassian, commonly used in edge and blob detection. Input image is converted according to the conventions of ``img_as_float``. Except for sigma values, all parameters are used for both filters. Examples -------- Apply a simple Difference of Gaussians filter to a color image: >>> from skimage.data import astronaut >>> from skimage.filters import difference_of_gaussians >>> image = astronaut() >>> filtered_image = difference_of_gaussians(image, 2, 10, ... multichannel=True) Apply a Laplacian of Gaussian filter as approximated by the Difference of Gaussians filter: >>> filtered_image = difference_of_gaussians(image, 2, multichannel=True) Apply a Difference of Gaussians filter to a grayscale image using different sigma values for each axis: >>> from skimage.data import camera >>> image = camera() >>> filtered_image = difference_of_gaussians(image, (2,5), (3,20)) """""" image = img_as_float(image) sigma1 = np.array(sigma1, dtype='float', ndmin=1) if sigma2 is None: sigma2 = sigma1 * 1.6 else: sigma2 = np.array(sigma2, dtype='float', ndmin=1) if multichannel is True: spatial_dims = image.ndim - 1 else: spatial_dims = image.ndim if len(sigma1) != 1 and len(sigma1) != spatial_dims: raise ValueError('sigma1 must have length equal to number of spatial' ' dimensions of input') if len(sigma2) != 1 and len(sigma2) != spatial_dims: raise ValueError('sigma2 must have length equal to number of spatial' ' dimensions of input') sigma1 = sigma1 * np.ones(spatial_dims) sigma2 = sigma2 * np.ones(spatial_dims) if any(sigma2 < sigma1): raise ValueError('sigma2 must be equal to or larger than sigma1 for' ' all axes') im1 = gaussian(image, sigma1, mode=mode, cval=cval, multichannel=multichannel, truncate=truncate) im2 = gaussian(image, sigma2, mode=mode, cval=cval, multichannel=multichannel, truncate=truncate) return im1 - im2 ","def difference_of_gaussians(image, sigma1, sigma2=None, *, mode='nearest', cval=0, multichannel=False, truncate=4.0): """"""Multi-dimensional band-pass filter using the Difference of Gaussians method. Parameters ---------- image : ndarray Input array to filter. sigma1 : scalar or sequence of scalars Standard deviation(s) for the Gaussian kernel with the smaller sigmas across all axes. The standard deviations are given for each axis as a sequence, or as a single number, in which case the single number is used as the standard deviation value for all axes. sigma2 : scalar or sequence of scalars, optional (default is None) Standard deviation(s) for the Gaussian kernel with the larger sigmas across all axes. The standard deviations are given for each axis as a sequence, or as a single number, in which case the single number is used as the standard deviation value for all axes. If None is given (default), sigmas for all axes are calculated as 1.6 * sigma1. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'nearest'. cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 multichannel : bool, optional (default: False) Whether the last axis of the image is to be interpreted as multiple channels. If True, each channel is filtered separately (channels are not mixed together). truncate : float, optional (default is 4.0) Truncate the filter at this many standard deviations. Returns ------- filtered_image : ndarray The filtered array. Notes ----- This function will subtract an array filtered with a guassian kernel with sigmas given by ``sigma2`` from an array filtered with a gaussian kernel with sigmas provided by ``sigma1``. The values for ``sigma2`` must always be greater than or equal to the corresponding values in ``sigma1``, or a ``ValueError`` will be raised. When ``sigma2`` is none, the values for ``sigma2`` will be calculated as 1.6x the corresponding values in ``sigma1``. This approximates the inverted Laplacian of Guassian, commonly used in edge and blob detection. Input image is converted according to the conventions of ``img_as_float``. Except for sigma values, all parameters are used for both filters. Examples -------- Apply a simple Difference of Gaussians filter to a color image: >>> from skimage.data import astronaut >>> from skimage.filters import difference_of_gaussians >>> image = astronaut() >>> filtered_image = difference_of_gaussians(image, 2, 10, ... multichannel=True) Apply a Laplacian of Gaussian filter as approximated by the Difference of Gaussians filter: >>> filtered_image = difference_of_gaussians(image, 2, multichannel=True) Apply a Difference of Gaussians filter to a grayscale image using different sigma values for each axis: >>> from skimage.data import camera >>> image = camera() >>> filtered_image = difference_of_gaussians(image, (2,5), (3,20)) """""" image = img_as_float(image) sigma1 = np.array(sigma1, dtype='float', ndmin=1) if sigma2 is None: sigma2 = sigma1 * 1.6 else: sigma2 = np.array(sigma2, dtype='float', ndmin=1) if multichannel is True: spatial_dims = image.ndim - 1 else: spatial_dims = image.ndim if len(sigma1) != 1 and len(sigma1) != spatial_dims: raise ValueError('sigma1 must have length equal to number of spatial' ' dimensions of input') if len(sigma2) != 1 and len(sigma2) != spatial_dims: raise ValueError('sigma2 must have length equal to number of spatial' ' dimensions of input') sigma1 = sigma1 * np.ones(spatial_dims) sigma2 = sigma2 * np.ones(spatial_dims) if any(sigma2 < sigma1): raise ValueError('sigma2 must be equal to or larger than sigma1 for' ' all axes') im1 = gaussian(image, sigma1, mode=mode, cval=cval, multichannel=multichannel, truncate=truncate) im2 = gaussian(image, sigma2, mode=mode, cval=cval, multichannel=multichannel, truncate=truncate) return im1 - im2 " 24097,"def SetForegroundWindow(window: Window, logger: Logger) -> bool: """"""This may be unreliable, there are conditions on which processes can set the foreground window. https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow#remarks Additionally, note that this code is run by the Robot Framework test runner, not NVDA. """""" if window is None or Window.hwndVal is None: return False if window.hwndVal == GetForegroundHwnd(): title = _GetWindowTitle(window.hwndVal) logger(f""Window already focused, HWND '{window.hwndVal}' with title: {title}"") return True logger(f""Focusing window to (HWND: {window.hwndVal}) (title: {window.title})"") # https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow#remarks # The test may not be able to set the foreground window with user32.SetForegroundWindow return windll.user32.SetForegroundWindow(window.hwndVal) ","def SetForegroundWindow(window: Window, logger: Logger) -> bool: """"""This may be unreliable, there are conditions on which processes can set the foreground window. https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow#remarks Additionally, note that this code is run by the Robot Framework test runner, not NVDA. """""" if window is None or window.hwndVal is None: return False if window.hwndVal == GetForegroundHwnd(): title = _GetWindowTitle(window.hwndVal) logger(f""Window already focused, HWND '{window.hwndVal}' with title: {title}"") return True logger(f""Focusing window to (HWND: {window.hwndVal}) (title: {window.title})"") # https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow#remarks # The test may not be able to set the foreground window with user32.SetForegroundWindow return windll.user32.SetForegroundWindow(window.hwndVal) " 4870,"def interval_contains_open(interval, val): """""" Check, excluding endpoints, whether an interval includes a given value. Parameters ---------- interval : sequence of scalar A 2-length sequence, endpoints that define the interval. val : scalar Value to check is within interval. Returns ------- bool Whether if given val is within the interval. """""" a, b = interval return a < val < b or a > val > b ","def interval_contains_open(interval, val): """""" Check, excluding endpoints, whether an interval includes a given value. Parameters ---------- interval : sequence of scalar A 2-length sequence, endpoints that define the interval. val : scalar Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*. """""" a, b = interval return a < val < b or a > val > b " 32046,"def delete_list(args: dict, sg): listID = args.get('list_id') params = {} deleteContacts = args.get('delete_contacts') if deleteContacts: params['delete_contacts'] = False if deleteContacts == 'False' else True response = sg.client.marketing.lists._(listID).delete(query_params=params) if response.status_code == 200: rBody = response.body body = json.loads(rBody.decode(""utf-8"")) ec = {'Sendgrid.DeleteListJobId': body['job_id']} md = tableToMarkdown('The delete has been accepted and is processing. \ You can check the status using the Job ID: ', body['job_id']) return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': body, 'HumanReadable': md, 'EntryContext': ec } elif response.status_code == 204: return 'The delete has been successful ' else: return 'List delete has been failed: ' + str(response.body) ","def delete_list(args: dict, sg): listID = args.get('list_id') params = {} deleteContacts = args.get('delete_contacts') if deleteContacts: params['delete_contacts'] = False if deleteContacts == 'False' else True response = sg.client.marketing.lists._(listID).delete(query_params=params) if response.status_code == 200: rBody = response.body body = json.loads(rBody.decode(""utf-8"")) ec = {'Sendgrid.DeleteListJobId': body['job_id']} md = tableToMarkdown('The delete has been accepted and is processing. \ You can check the status using the Job ID: ', body['job_id']) return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': body, 'HumanReadable': md, 'EntryContext': ec } elif response.status_code == 204: return 'The delete has been successful ' else: return 'Failed to delete list: ' + str(response.body) " 13270,"def get_type_hints(thing): """"""Like the typing version, but tries harder and never errors. Tries harder: if the thing to inspect is a class but typing.get_type_hints raises an error or returns no hints, then this function will try calling it on the __init__ method. This second step often helps with user-defined classes on older versions of Python. The third step we take is trying to fetch types from the __signature__ property. They override any other ones we found earlier. Never errors: instead of raising TypeError for uninspectable objects, or NameError for unresolvable forward references, just return an empty dict. """""" try: hints = typing.get_type_hints(thing) except (AttributeError, TypeError, NameError): hints = {} if not inspect.isclass(thing): return hints try: hints.update(typing.get_type_hints(thing.__init__)) except (TypeError, NameError, AttributeError): pass try: if hasattr(thing, ""__signature__""): # It is possible for the signature and annotations attributes to # differ on an object due to renamed arguments. # To prevent missing arguments we use the signature to provide any type # hints it has and then override any common names with the more # comprehensive type information from get_type_hints # See https://github.com/HypothesisWorks/hypothesis/pull/2580 # for more details. spec = inspect.getfullargspec(thing) hints.update( { k: v for k, v in spec.annotations.items() if k in (spec.args + spec.kwonlyargs) } ) except (AttributeError, TypeError, NameError): pass return hints ","def get_type_hints(thing): """"""Like the typing version, but tries harder and never errors. Tries harder: if the thing to inspect is a class but typing.get_type_hints raises an error or returns no hints, then this function will try calling it on the __init__ method. This second step often helps with user-defined classes on older versions of Python. The third step we take is trying to fetch types from the __signature__ property. They override any other ones we found earlier. Never errors: instead of raising TypeError for uninspectable objects, or NameError for unresolvable forward references, just return an empty dict. """""" try: hints = typing.get_type_hints(thing) except (AttributeError, TypeError, NameError): hints = {} if not inspect.isclass(thing): return hints try: hints.update(typing.get_type_hints(thing.__init__)) except (TypeError, NameError, AttributeError): pass try: if hasattr(thing, ""__signature__""): # It is possible for the signature and annotations attributes to # differ on an object due to renamed arguments. # To prevent missing arguments we use the signature to provide any type # hints it has and then override any common names with the more # comprehensive type information from get_type_hints # See https://github.com/HypothesisWorks/hypothesis/pull/2580 # for more details. spec = inspect.getfullargspec(thing) hints.update( { k: v for k, v in spec.annotations.items() if k in (spec.args + spec.kwonlyargs) and is_a_type(v) } ) except (AttributeError, TypeError, NameError): pass return hints " 58105,"def main(): demisto.info(f'Command is {demisto.command}') try: args = demisto.args() if demisto.command() == 'test-module': demisto.results(test()) if demisto.command() == 'bt-get-tree': demisto.results(get_tree(args)) except Exception as e: demisto.error(e) raise ","def main(): demisto.info(f'Command is {demisto.command}') try: args = demisto.args() if demisto.command() == 'test-module': demisto.results(test()) if demisto.command() == 'bt-get-tree': demisto.results(get_tree(args)) except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') raise " 43299,"def test_mutag_load() -> None: graphs, labels = MUTAG().load() n_graphs = 188 assert len(graphs) == n_graphs assert len(labels) == n_graphs # one label per graph # get a list with the number of nodes in each graph n_nodes = [g.number_of_nodes() for g in graphs] # calculate average and max number of nodes across all graphs n_avg_nodes = np.mean(n_nodes) max_nodes = np.max(n_nodes) # average number of nodes should be 17.93085... or approximately 18. assert round(n_avg_nodes) == 18 # maximum number of nodes should be 28 assert max_nodes == 28 # There are two labels -1 and 1 assert len(np.unique(labels)) == 2 ","def test_mutag_load() -> None: graphs, labels = MUTAG().load() n_graphs = 188 assert len(graphs) == n_graphs assert len(labels) == n_graphs # one label per graph # get a list with the number of nodes in each graph n_nodes = [g.number_of_nodes() for g in graphs] # calculate average and max number of nodes across all graphs n_avg_nodes = np.mean(n_nodes) max_nodes = np.max(n_nodes) # average number of nodes should be 17.93085... or approximately 18. assert round(n_avg_nodes) == 18 # maximum number of nodes should be 28 assert max_nodes == 28 # There are two labels -1 and 1 assert set(labels) == {-1, 1} " 48182,"def migrate_componentlist(apps, schema_editor): Group = apps.get_model(""weblate_auth"", ""Group"") db_alias = schema_editor.connection.alias groups = Group.objects.using(db_alias).all() for group in groups: if group.componentlist: group.componentlists.add(group.componentlist) ","def migrate_componentlist(apps, schema_editor): Group = apps.get_model(""weblate_auth"", ""Group"") db_alias = schema_editor.connection.alias groups = Group.objects.using(db_alias).filter(componentlist__isnull=False) for group in groups: group.componentlists.add(group.componentlist) " 5453,"def expand(path): """""" Expands all user and environment variables .. versionadded:: 3006 Args: path (str): A path to a file or directory Returns: str: A fully expanded, real path """""" path = os.path.expanduser(path) # expand ~ to home directory path = os.path.expandvars(path) # expand any other environment vars return os.path.realpath(path) # fix path format ","def expand(path): """""" Expands all user and environment variables .. versionadded:: 3005 Args: path (str): A path to a file or directory Returns: str: A fully expanded, real path """""" path = os.path.expanduser(path) # expand ~ to home directory path = os.path.expandvars(path) # expand any other environment vars return os.path.realpath(path) # fix path format " 37106,"def circuit_drawer(circuit, scale=0.7, filename=None, style=None, output=None, interactive=False, line_length=None, plot_barriers=True, reverse_bits=False, justify=None, vertical_compression='medium', idle_wires=True, with_layout=True, fold=None): """"""Draw a quantum circuit to different formats (set by output parameter): **text**: ASCII art TextDrawing that can be printed in the console. **latex**: high-quality images compiled via latex. **latex_source**: raw uncompiled latex output. **matplotlib**: images with color rendered purely in Python. Args: circuit (QuantumCircuit): the quantum circuit to draw scale (float): scale of image to draw (shrink if < 1) filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file. This option is only used by the ``mpl`` output type. If a str is passed in that is the path to a json file which contains that will be open, parsed, and then used just as the input dict. See: :ref:`Style Dict Doc ` for more information on the contents. output (str): Select the output method to use for drawing the circuit. Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``. By default the `'text`' drawer is used unless a user config file has an alternative backend set as the default. If the output kwarg is set, that backend will always be used over the default in a user config file. interactive (bool): when set true show the circuit in a new window (for `mpl` this depends on the matplotlib backend being used supporting this). Note when used with either the `text` or the `latex_source` output type this has no effect and will be silently ignored. line_length (int): Deprecated, see `fold` which superseceds this option. Sets the length of the lines generated by `text` output type. This useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using ``shutil.get_terminal_size()``. However, if you're running in jupyter the default line length is set to 80 characters. If you don't want pagination at all, set ``line_length=-1``. reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (string): Options are ``left``, ``right`` or ``none``, if anything else is supplied it defaults to left justified. It refers to where gates should be placed in the output circuit if there is an option. ``none`` results in each gate being placed in its own column. vertical_compression (string): ``high``, ``medium`` or ``low``. It merges the lines generated by the ``text`` output so the drawing will take less vertical room. Default is ``medium``. Only used by the ``text`` output, will be silently ignored otherwise. idle_wires (bool): Include idle wires (wires with no circuit elements) in output visualization. Default is True. with_layout (bool): Include layout information, with labels on the physical layout. fold (int): Sets pagination. It can be disabled using -1. In `text`, sets the length of the lines. This useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using ``shutil.get_terminal_size()``. However, if running in jupyter, the default line length is set to 80 characters. In ``mpl`` is the amount of operations before folding. Default is 25. Returns: :class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or :class:`TextDrawing`: * `PIL.Image` (output='latex') an in-memory representation of the image of the circuit diagram. * `matplotlib.figure.Figure` (output='mpl') a matplotlib figure object for the circuit diagram. * `str` (output='latex_source') The LaTeX source code for visualizing the circuit diagram. * `TextDrawing` (output='text') A drawing that can be printed as ascii art Raises: VisualizationError: when an invalid output method is selected ImportError: when the output methods requieres non-installed libraries. .. _style-dict-doc: **Style Dict Details** The style dict kwarg contains numerous options that define the style of the output circuit visualization. The style dict is only used by the ``mpl`` output. The options available in the style dict are defined below: Args: textcolor (str): The color code to use for text. Defaults to `'#000000'` subtextcolor (str): The color code to use for subtext. Defaults to `'#000000'` linecolor (str): The color code to use for lines. Defaults to `'#000000'` creglinecolor (str): The color code to use for classical register lines. Defaults to `'#778899'` gatetextcolor (str): The color code to use for gate text. Defaults to `'#000000'` gatefacecolor (str): The color code to use for gates. Defaults to `'#ffffff'` barrierfacecolor (str): The color code to use for barriers. Defaults to `'#bdbdbd'` backgroundcolor (str): The color code to use for the background. Defaults to `'#ffffff'` fontsize (int): The font size to use for text. Defaults to 13 subfontsize (int): The font size to use for subtext. Defaults to 8 displaytext (dict): A dictionary of the text to use for each element type in the output visualization. The default values are:: { 'id': 'id', 'u0': 'U_0', 'u1': 'U_1', 'u2': 'U_2', 'u3': 'U_3', 'x': 'X', 'y': 'Y', 'z': 'Z', 'h': 'H', 's': 'S', 'sdg': 'S^\\dagger', 't': 'T', 'tdg': 'T^\\dagger', 'rx': 'R_x', 'ry': 'R_y', 'rz': 'R_z', 'reset': '\\left|0\\right\\rangle' } You must specify all the necessary values if using this. There is no provision for passing an incomplete dict in. displaycolor (dict): The color codes to use for each circuit element. The default values are:: { 'id': '#F0E442', 'u0': '#E7AB3B', 'u1': '#E7AB3B', 'u2': '#E7AB3B', 'u3': '#E7AB3B', 'x': '#58C698', 'y': '#58C698', 'z': '#58C698', 'h': '#70B7EB', 's': '#E0722D', 'sdg': '#E0722D', 't': '#E0722D', 'tdg': '#E0722D', 'rx': '#ffffff', 'ry': '#ffffff', 'rz': '#ffffff', 'reset': '#D188B4', 'target': '#70B7EB', 'meas': '#D188B4' } Also, just like `displaytext` there is no provision for an incomplete dict passed in. latexdrawerstyle (bool): When set to True enable latex mode which will draw gates like the `latex` output modes. usepiformat (bool): When set to True use radians for output fold (int): The number of circuit elements to fold the circuit at. Defaults to 20 cregbundle (bool): If set True bundle classical registers showindex (bool): If set True draw an index. compress (bool): If set True draw a compressed circuit figwidth (int): The maximum width (in inches) for the output figure. dpi (int): The DPI to use for the output image. Defaults to 150 margin (list): A list of margin values to adjust spacing around output image. Takes a list of 4 ints: [x left, x right, y bottom, y top]. creglinestyle (str): The style of line to use for classical registers. Choices are `'solid'`, `'doublet'`, or any valid matplotlib `linestyle` kwarg value. Defaults to `doublet` """""" image = None config = user_config.get_config() # Get default from config file else use text default_output = 'text' if config: default_output = config.get('circuit_drawer', 'text') if default_output == 'auto': if _matplotlib.HAS_MATPLOTLIB: default_output = 'mpl' else: default_output = 'text' if output is None: output = default_output if output == 'text': return _text_circuit_drawer(circuit, filename=filename, line_length=line_length, reverse_bits=reverse_bits, plot_barriers=plot_barriers, justify=justify, vertical_compression=vertical_compression, idle_wires=idle_wires, with_layout=with_layout, fold=fold) elif output == 'latex': image = _latex_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout) elif output == 'latex_source': return _generate_latex_source(circuit, filename=filename, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout) elif output == 'mpl': image = _matplotlib_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout, fold=fold) else: raise exceptions.VisualizationError( 'Invalid output type %s selected. The only valid choices ' 'are latex, latex_source, text, and mpl' % output) if image and interactive: image.show() return image ","def circuit_drawer(circuit, scale=0.7, filename=None, style=None, output=None, interactive=False, line_length=None, plot_barriers=True, reverse_bits=False, justify=None, vertical_compression='medium', idle_wires=True, with_layout=True, fold=None): """"""Draw a quantum circuit to different formats (set by output parameter): **text**: ASCII art TextDrawing that can be printed in the console. **latex**: high-quality images compiled via latex. **latex_source**: raw uncompiled latex output. **matplotlib**: images with color rendered purely in Python. Args: circuit (QuantumCircuit): the quantum circuit to draw scale (float): scale of image to draw (shrink if < 1) filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file. This option is only used by the ``mpl`` output type. If a str is passed in that is the path to a json file which contains that will be open, parsed, and then used just as the input dict. See: :ref:`Style Dict Doc ` for more information on the contents. output (str): Select the output method to use for drawing the circuit. Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``. By default the `'text`' drawer is used unless a user config file has an alternative backend set as the default. If the output kwarg is set, that backend will always be used over the default in a user config file. interactive (bool): when set true show the circuit in a new window (for `mpl` this depends on the matplotlib backend being used supporting this). Note when used with either the `text` or the `latex_source` output type this has no effect and will be silently ignored. line_length (int): Deprecated, see `fold` which supersedes this option. Sets the length of the lines generated by `text` output type. This useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using ``shutil.get_terminal_size()``. However, if you're running in jupyter the default line length is set to 80 characters. If you don't want pagination at all, set ``line_length=-1``. reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (string): Options are ``left``, ``right`` or ``none``, if anything else is supplied it defaults to left justified. It refers to where gates should be placed in the output circuit if there is an option. ``none`` results in each gate being placed in its own column. vertical_compression (string): ``high``, ``medium`` or ``low``. It merges the lines generated by the ``text`` output so the drawing will take less vertical room. Default is ``medium``. Only used by the ``text`` output, will be silently ignored otherwise. idle_wires (bool): Include idle wires (wires with no circuit elements) in output visualization. Default is True. with_layout (bool): Include layout information, with labels on the physical layout. fold (int): Sets pagination. It can be disabled using -1. In `text`, sets the length of the lines. This useful when the drawing does not fit in the console. If None (default), it will try to guess the console width using ``shutil.get_terminal_size()``. However, if running in jupyter, the default line length is set to 80 characters. In ``mpl`` is the amount of operations before folding. Default is 25. Returns: :class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or :class:`TextDrawing`: * `PIL.Image` (output='latex') an in-memory representation of the image of the circuit diagram. * `matplotlib.figure.Figure` (output='mpl') a matplotlib figure object for the circuit diagram. * `str` (output='latex_source') The LaTeX source code for visualizing the circuit diagram. * `TextDrawing` (output='text') A drawing that can be printed as ascii art Raises: VisualizationError: when an invalid output method is selected ImportError: when the output methods requieres non-installed libraries. .. _style-dict-doc: **Style Dict Details** The style dict kwarg contains numerous options that define the style of the output circuit visualization. The style dict is only used by the ``mpl`` output. The options available in the style dict are defined below: Args: textcolor (str): The color code to use for text. Defaults to `'#000000'` subtextcolor (str): The color code to use for subtext. Defaults to `'#000000'` linecolor (str): The color code to use for lines. Defaults to `'#000000'` creglinecolor (str): The color code to use for classical register lines. Defaults to `'#778899'` gatetextcolor (str): The color code to use for gate text. Defaults to `'#000000'` gatefacecolor (str): The color code to use for gates. Defaults to `'#ffffff'` barrierfacecolor (str): The color code to use for barriers. Defaults to `'#bdbdbd'` backgroundcolor (str): The color code to use for the background. Defaults to `'#ffffff'` fontsize (int): The font size to use for text. Defaults to 13 subfontsize (int): The font size to use for subtext. Defaults to 8 displaytext (dict): A dictionary of the text to use for each element type in the output visualization. The default values are:: { 'id': 'id', 'u0': 'U_0', 'u1': 'U_1', 'u2': 'U_2', 'u3': 'U_3', 'x': 'X', 'y': 'Y', 'z': 'Z', 'h': 'H', 's': 'S', 'sdg': 'S^\\dagger', 't': 'T', 'tdg': 'T^\\dagger', 'rx': 'R_x', 'ry': 'R_y', 'rz': 'R_z', 'reset': '\\left|0\\right\\rangle' } You must specify all the necessary values if using this. There is no provision for passing an incomplete dict in. displaycolor (dict): The color codes to use for each circuit element. The default values are:: { 'id': '#F0E442', 'u0': '#E7AB3B', 'u1': '#E7AB3B', 'u2': '#E7AB3B', 'u3': '#E7AB3B', 'x': '#58C698', 'y': '#58C698', 'z': '#58C698', 'h': '#70B7EB', 's': '#E0722D', 'sdg': '#E0722D', 't': '#E0722D', 'tdg': '#E0722D', 'rx': '#ffffff', 'ry': '#ffffff', 'rz': '#ffffff', 'reset': '#D188B4', 'target': '#70B7EB', 'meas': '#D188B4' } Also, just like `displaytext` there is no provision for an incomplete dict passed in. latexdrawerstyle (bool): When set to True enable latex mode which will draw gates like the `latex` output modes. usepiformat (bool): When set to True use radians for output fold (int): The number of circuit elements to fold the circuit at. Defaults to 20 cregbundle (bool): If set True bundle classical registers showindex (bool): If set True draw an index. compress (bool): If set True draw a compressed circuit figwidth (int): The maximum width (in inches) for the output figure. dpi (int): The DPI to use for the output image. Defaults to 150 margin (list): A list of margin values to adjust spacing around output image. Takes a list of 4 ints: [x left, x right, y bottom, y top]. creglinestyle (str): The style of line to use for classical registers. Choices are `'solid'`, `'doublet'`, or any valid matplotlib `linestyle` kwarg value. Defaults to `doublet` """""" image = None config = user_config.get_config() # Get default from config file else use text default_output = 'text' if config: default_output = config.get('circuit_drawer', 'text') if default_output == 'auto': if _matplotlib.HAS_MATPLOTLIB: default_output = 'mpl' else: default_output = 'text' if output is None: output = default_output if output == 'text': return _text_circuit_drawer(circuit, filename=filename, line_length=line_length, reverse_bits=reverse_bits, plot_barriers=plot_barriers, justify=justify, vertical_compression=vertical_compression, idle_wires=idle_wires, with_layout=with_layout, fold=fold) elif output == 'latex': image = _latex_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout) elif output == 'latex_source': return _generate_latex_source(circuit, filename=filename, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout) elif output == 'mpl': image = _matplotlib_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify, idle_wires=idle_wires, with_layout=with_layout, fold=fold) else: raise exceptions.VisualizationError( 'Invalid output type %s selected. The only valid choices ' 'are latex, latex_source, text, and mpl' % output) if image and interactive: image.show() return image " 54238,"def main(): quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q' file_names = glob.glob('cirq**/cirq**/**/*.py', recursive=True) assert file_names # Remove the engine client code. excluded = [ 'cirq-google/cirq_google/engine/client/', 'cirq-google/cirq_google/cloud/', 'cirq-google/cirq_google/api', ] file_names = [f for f in file_names if not any(f.startswith(x) for x in excluded)] failed, attempted = run_tests( file_names, include_modules=True, include_local=False, quiet=quiet ) if failed != 0: print( shell_tools.highlight( 'Failed: {} failed, {} passed, {} total'.format( failed, attempted - failed, attempted ), shell_tools.RED, ) ) sys.exit(1) else: print(shell_tools.highlight(f'Passed: {attempted}', shell_tools.GREEN)) sys.exit(0) ","def main(): quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q' file_names = glob.glob('cirq**/cirq**/**/*.py', recursive=True) assert file_names # Remove the engine client code. excluded = [ 'cirq-google/cirq_google/engine/client/', 'cirq-google/cirq_google/cloud/', 'cirq-google/cirq_google/api/', ] file_names = [f for f in file_names if not any(f.startswith(x) for x in excluded)] file_names = [f for f in file_names if not f.endswith('_test.py')] failed, attempted = run_tests( file_names, include_modules=True, include_local=False, quiet=quiet ) if failed != 0: print( shell_tools.highlight( 'Failed: {} failed, {} passed, {} total'.format( failed, attempted - failed, attempted ), shell_tools.RED, ) ) sys.exit(1) else: print(shell_tools.highlight(f'Passed: {attempted}', shell_tools.GREEN)) sys.exit(0) " 3744,"def deprecate_with_doc(msg): """""" Returns new _Deprecate class object. The object can be used to Issue a DeprecationWarning, by passing `func` as arguement,this adds warning to `old_name`'s docstring, rebinds ``old_name.__name__`` and returns the new function object. This function may also be used as a decorator. See Also -------- deprecate` Parameters ---------- message : str Additional explanation of the deprecation. Displayed in the docstring after the warning. Returns ------- _Deprecate object : object The _Deprecate class object. Examples -------- Note that ``olduint`` returns a value after printing DeprecationWarning with msg: >>>oldobj = np.deprecate_with_doc(""Use np.int_ instead."") >>>olduint = oldobj(np.uint) >>>DeprecationWarning: `uint64` is deprecated! #may vary ... Use np.int_ instead. >>>olduint(6) >>>6 """""" return _Deprecate(message=msg) ","def deprecate_with_doc(msg): """""" Returns new _Deprecate class object. The object can be used to Issue a DeprecationWarning, by passing `func` as arguement,this adds warning to `old_name`'s docstring, rebinds ``old_name.__name__`` and returns the new function object. This function may also be used as a decorator. See Also -------- deprecate : Decorate a function such that it issues a `DeprecationWarning` Parameters ---------- message : str Additional explanation of the deprecation. Displayed in the docstring after the warning. Returns ------- _Deprecate object : object The _Deprecate class object. Examples -------- Note that ``olduint`` returns a value after printing DeprecationWarning with msg: >>>oldobj = np.deprecate_with_doc(""Use np.int_ instead."") >>>olduint = oldobj(np.uint) >>>DeprecationWarning: `uint64` is deprecated! #may vary ... Use np.int_ instead. >>>olduint(6) >>>6 """""" return _Deprecate(message=msg) " 44790,"def search_runs( experiment_ids=None, filter_string="""", run_view_type=ViewType.ACTIVE_ONLY, max_results=SEARCH_MAX_RESULTS_PANDAS, order_by=None, ): """""" Get a pandas DataFrame of runs that fit the search criteria. :param experiment_ids: List of experiment IDs. None will default to the active experiment. :param filter_string: Filter query string, defaults to searching all runs. :param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs defined in :py:class:`mlflow.entities.ViewType`. :param max_results: The maximum number of runs to put in the dataframe. Default is 100,000 to avoid causing out-of-memory issues on the user's machine. :param order_by: List of columns to order by (e.g., ""metrics.rmse""). The ``order_by`` column can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``. The default ordering is to sort by ``start_time DESC``, then ``run_id``. :return: A pandas.DataFrame of runs, where each metric, parameter, and tag are expanded into their own columns named metrics.*, params.*, and tags.* respectively. For runs that don't have a particular metric, parameter, or tag, their value will be (NumPy) Nan, None, or None respectively. .. code-block:: python :caption: Example import mlflow # Create an experiment and log two runs under it experiment_id = mlflow.create_experiment(""Social NLP Experiments"") with mlflow.start_run(experiment_id=experiment_id): mlflow.log_metric(""m"", 1.55) mlflow.set_tag(""s.release"", ""1.1.0-RC"") with mlflow.start_run(experiment_id=experiment_id): mlflow.log_metric(""m"", 2.50) mlflow.set_tag(""s.release"", ""1.2.0-GA"") # Search all runs in experiment_id df = mlflow.search_runs([experiment_id], order_by=[""metrics.m DESC""]) # Print pandas DataFrame's rows and columns print(df.loc[:, [""metrics.m"", ""tags.s.release"", ""run_id""]].to_string()) print(""--"") # Search the experiment_id using a filter_string with tag # that has a case insensitive pattern filter_string = ""tags.s.release ILIKE '%rc%'"" df = mlflow.search_runs([experiment_id], filter_string=filter_string) # Print pandas DataFrame's rows and columns print(df.loc[:, [""metrics.m"", ""tags.s.release"", ""run_id""]].to_string()) .. code-block:: text :caption: Output metrics.m tags.s.release run_id 0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2 1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4 -- metrics.m tags.s.release run_id 0 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4 """""" if not experiment_ids: experiment_ids = _get_experiment_id() # Using an internal function as the linter doesn't like assigning a lambda, and inlining the # full thing is a mess def pagination_wrapper_func(number_to_get, next_page_token): return MlflowClient().search_runs( experiment_ids, filter_string, run_view_type, number_to_get, order_by, next_page_token ) runs = _paginate(pagination_wrapper_func, NUM_RUNS_PER_PAGE_PANDAS, max_results) info = { ""run_id"": [], ""experiment_id"": [], ""status"": [], ""artifact_uri"": [], ""start_time"": [], ""end_time"": [], } params, metrics, tags = ({}, {}, {}) PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None) for i, run in enumerate(runs): info[""run_id""].append(run.info.run_id) info[""experiment_id""].append(run.info.experiment_id) info[""status""].append(run.info.status) info[""artifact_uri""].append(run.info.artifact_uri) info[""start_time""].append(pd.to_datetime(run.info.start_time, unit=""ms"", utc=True)) info[""end_time""].append(pd.to_datetime(run.info.end_time, unit=""ms"", utc=True)) # Params param_keys = set(params.keys()) for key in param_keys: if key in run.data.params: params[key].append(run.data.params[key]) else: params[key].append(PARAM_NULL) new_params = set(run.data.params.keys()) - param_keys for p in new_params: params[p] = [PARAM_NULL] * i # Fill in null values for all previous runs params[p].append(run.data.params[p]) # Metrics metric_keys = set(metrics.keys()) for key in metric_keys: if key in run.data.metrics: metrics[key].append(run.data.metrics[key]) else: metrics[key].append(METRIC_NULL) new_metrics = set(run.data.metrics.keys()) - metric_keys for m in new_metrics: metrics[m] = [METRIC_NULL] * i metrics[m].append(run.data.metrics[m]) # Tags tag_keys = set(tags.keys()) for key in tag_keys: if key in run.data.tags: tags[key].append(run.data.tags[key]) else: tags[key].append(TAG_NULL) new_tags = set(run.data.tags.keys()) - tag_keys for t in new_tags: tags[t] = [TAG_NULL] * i tags[t].append(run.data.tags[t]) data = {} data.update(info) for key in metrics: data[""metrics."" + key] = metrics[key] for key in params: data[""params."" + key] = params[key] for key in tags: data[""tags."" + key] = tags[key] return pd.DataFrame(data) ","def search_runs( experiment_ids=None, filter_string="""", run_view_type=ViewType.ACTIVE_ONLY, max_results=SEARCH_MAX_RESULTS_PANDAS, order_by=None, ): """""" Get a pandas DataFrame of runs that fit the search criteria. :param experiment_ids: List of experiment IDs. None will default to the active experiment. :param filter_string: Filter query string, defaults to searching all runs. :param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs defined in :py:class:`mlflow.entities.ViewType`. :param max_results: The maximum number of runs to put in the dataframe. Default is 100,000 to avoid causing out-of-memory issues on the user's machine. :param order_by: List of columns to order by (e.g., ""metrics.rmse""). The ``order_by`` column can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``. The default ordering is to sort by ``start_time DESC``, then ``run_id``. :return: A pandas.DataFrame of runs, where each metric, parameter, and tag are expanded into their own columns named metrics.*, params.*, and tags.* respectively. For runs that don't have a particular metric, parameter, or tag, their value will be (NumPy) Nan, None, or None respectively. .. code-block:: python :caption: Example import mlflow # Create an experiment and log two runs under it experiment_id = mlflow.create_experiment(""Social NLP Experiments"") with mlflow.start_run(experiment_id=experiment_id): mlflow.log_metric(""m"", 1.55) mlflow.set_tag(""s.release"", ""1.1.0-RC"") with mlflow.start_run(experiment_id=experiment_id): mlflow.log_metric(""m"", 2.50) mlflow.set_tag(""s.release"", ""1.2.0-GA"") # Search all runs in experiment_id df = mlflow.search_runs([experiment_id], order_by=[""metrics.m DESC""]) # Print pandas DataFrame's rows and columns print(df[[""metrics.m"", ""tags.s.release"", ""run_id""]].to_string()) print(""--"") # Search the experiment_id using a filter_string with tag # that has a case insensitive pattern filter_string = ""tags.s.release ILIKE '%rc%'"" df = mlflow.search_runs([experiment_id], filter_string=filter_string) # Print pandas DataFrame's rows and columns print(df.loc[:, [""metrics.m"", ""tags.s.release"", ""run_id""]].to_string()) .. code-block:: text :caption: Output metrics.m tags.s.release run_id 0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2 1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4 -- metrics.m tags.s.release run_id 0 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4 """""" if not experiment_ids: experiment_ids = _get_experiment_id() # Using an internal function as the linter doesn't like assigning a lambda, and inlining the # full thing is a mess def pagination_wrapper_func(number_to_get, next_page_token): return MlflowClient().search_runs( experiment_ids, filter_string, run_view_type, number_to_get, order_by, next_page_token ) runs = _paginate(pagination_wrapper_func, NUM_RUNS_PER_PAGE_PANDAS, max_results) info = { ""run_id"": [], ""experiment_id"": [], ""status"": [], ""artifact_uri"": [], ""start_time"": [], ""end_time"": [], } params, metrics, tags = ({}, {}, {}) PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None) for i, run in enumerate(runs): info[""run_id""].append(run.info.run_id) info[""experiment_id""].append(run.info.experiment_id) info[""status""].append(run.info.status) info[""artifact_uri""].append(run.info.artifact_uri) info[""start_time""].append(pd.to_datetime(run.info.start_time, unit=""ms"", utc=True)) info[""end_time""].append(pd.to_datetime(run.info.end_time, unit=""ms"", utc=True)) # Params param_keys = set(params.keys()) for key in param_keys: if key in run.data.params: params[key].append(run.data.params[key]) else: params[key].append(PARAM_NULL) new_params = set(run.data.params.keys()) - param_keys for p in new_params: params[p] = [PARAM_NULL] * i # Fill in null values for all previous runs params[p].append(run.data.params[p]) # Metrics metric_keys = set(metrics.keys()) for key in metric_keys: if key in run.data.metrics: metrics[key].append(run.data.metrics[key]) else: metrics[key].append(METRIC_NULL) new_metrics = set(run.data.metrics.keys()) - metric_keys for m in new_metrics: metrics[m] = [METRIC_NULL] * i metrics[m].append(run.data.metrics[m]) # Tags tag_keys = set(tags.keys()) for key in tag_keys: if key in run.data.tags: tags[key].append(run.data.tags[key]) else: tags[key].append(TAG_NULL) new_tags = set(run.data.tags.keys()) - tag_keys for t in new_tags: tags[t] = [TAG_NULL] * i tags[t].append(run.data.tags[t]) data = {} data.update(info) for key in metrics: data[""metrics."" + key] = metrics[key] for key in params: data[""params."" + key] = params[key] for key in tags: data[""tags."" + key] = tags[key] return pd.DataFrame(data) " 43845,"def sparse_hamiltonian(H): r""""""Computes the sparse matrix representation a Hamiltonian in the computational basis. Args: H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be measured Returns: coo_matrix: a sparse matrix in scipy COOrdinate format with the dimension of :math:`(2^n, 2^n)` where :math:`n` is the number of wires **Example:** This function can be used by passing a `qml.Hamiltonian` object as: >>> coeffs = [0.5, 0.5] >>> obs = [qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]), ... qml.Identity(wires=[0]) @ qml.PauliZ(wires=[1])] >>> H = qml.Hamiltonian(coeffs, obs) >>> H_sparse = sparse_hamiltonian(H) The resulting sparse matrix can be either used directly or transformed into a numpy array: >>> H_sparse.toarray() array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]) """""" if not isinstance(H, qml.Hamiltonian): raise TypeError(""Passed Hamiltonian must be of type `qml.Hamiltonian`"") n = len(H.wires) matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype=""complex128"") for coeffs, ops in zip(H.coeffs, H.ops): obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs] mat = [scipy.sparse.eye(2, format=""coo"")] * n for i, j in enumerate(ops.wires): mat[j] = obs[i] matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format=""coo""), mat) * coeffs return matrix.tocoo() ","def sparse_hamiltonian(H): r""""""Computes the sparse matrix representation a Hamiltonian in the computational basis. Args: H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be computed Returns: coo_matrix: a sparse matrix in scipy COOrdinate format with the dimension of :math:`(2^n, 2^n)` where :math:`n` is the number of wires **Example:** This function can be used by passing a `qml.Hamiltonian` object as: >>> coeffs = [0.5, 0.5] >>> obs = [qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]), ... qml.Identity(wires=[0]) @ qml.PauliZ(wires=[1])] >>> H = qml.Hamiltonian(coeffs, obs) >>> H_sparse = sparse_hamiltonian(H) The resulting sparse matrix can be either used directly or transformed into a numpy array: >>> H_sparse.toarray() array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]]) """""" if not isinstance(H, qml.Hamiltonian): raise TypeError(""Passed Hamiltonian must be of type `qml.Hamiltonian`"") n = len(H.wires) matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype=""complex128"") for coeffs, ops in zip(H.coeffs, H.ops): obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs] mat = [scipy.sparse.eye(2, format=""coo"")] * n for i, j in enumerate(ops.wires): mat[j] = obs[i] matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format=""coo""), mat) * coeffs return matrix.tocoo() " 5744,"def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False, check_finite=True): """""" Solve equation a x = b. a is Hermitian positive-definite banded matrix. Uses Thomas' Algorithm, which is more efficent than Gaussian elimination, but is not stable for all invertable Hermitian banded matricies. The matrix a is stored in `ab` either in lower diagonal or upper diagonal ordered form: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of `ab` (shape of a is (6, 6), `u` =2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- ab : (`u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) lower : bool, optional Is the matrix in the lower form. (Default is upper form) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Shape of return matches shape of `b`. Notes ----- In cases when Thomas' Algorithm is not stable for an invertable matrix `a`, the solver `solve_banded` may be used. Examples -------- Solve the banded system A x = b, where:: [ 4 2 -1 0 0 0] [1] [ 2 5 2 -1 0 0] [2] A = [-1 2 6 2 -1 0] b = [2] [ 0 -1 2 7 2 -1] [3] [ 0 0 -1 2 8 2] [3] [ 0 0 0 -1 2 9] [3] >>> from scipy.linalg import solveh_banded `ab` contains the main diagonal and the nonzero diagonals below the main diagonal. That is, we use the lower form: >>> ab = np.array([[ 4, 5, 6, 7, 8, 9], ... [ 2, 2, 2, 2, 2, 0], ... [-1, -1, -1, -1, 0, 0]]) >>> b = np.array([1, 2, 2, 3, 3, 3]) >>> x = solveh_banded(ab, b, lower=True) >>> x array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031, 0.34733894]) Solve the Hermitian banded system H x = b, where:: [ 8 2-1j 0 0 ] [ 1 ] H = [2+1j 5 1j 0 ] b = [1+1j] [ 0 -1j 9 -2-1j] [1-2j] [ 0 0 -2+1j 6 ] [ 0 ] In this example, we put the upper diagonals in the array `hb`: >>> hb = np.array([[0, 2-1j, 1j, -2-1j], ... [8, 5, 9, 6 ]]) >>> b = np.array([1, 1+1j, 1-2j, 0]) >>> x = solveh_banded(hb, b) >>> x array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j, 0.10077984-0.23035393j, -0.00479904-0.09358128j]) """""" a1 = _asarray_validated(ab, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError(""shapes of ab and b are not compatible."") overwrite_b = overwrite_b or _datacopied(b1, b) overwrite_ab = overwrite_ab or _datacopied(a1, ab) if a1.shape[0] == 2: ptsv, = get_lapack_funcs(('ptsv',), (a1, b1)) if lower: d = a1[0, :].real e = a1[1, :-1] else: d = a1[1, :].real e = a1[0, 1:].conj() d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, overwrite_b) else: pbsv, = get_lapack_funcs(('pbsv',), (a1, b1)) c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab, overwrite_b=overwrite_b) if info > 0: raise LinAlgError(""Thomas' algorith failed: "" + ""leading minor %d not positive definite. "" % info + ""Try solve_banded instead"") if info < 0: raise ValueError(""illegal value in %dth argument of internal "" ""pbsv"" % -info) return x ","def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False, check_finite=True): """""" Solve equation a x = b. a is Hermitian positive-definite banded matrix. Uses Thomas' Algorithm, which is more efficent than Gaussian elimination, but is not stable for all invertable Hermitian banded matricies. The matrix a is stored in `ab` either in lower diagonal or upper diagonal ordered form: ab[u + i - j, j] == a[i,j] (if upper form; i <= j) ab[ i - j, j] == a[i,j] (if lower form; i >= j) Example of `ab` (shape of a is (6, 6), `u` =2):: upper form: * * a02 a13 a24 a35 * a01 a12 a23 a34 a45 a00 a11 a22 a33 a44 a55 lower form: a00 a11 a22 a33 a44 a55 a10 a21 a32 a43 a54 * a20 a31 a42 a53 * * Cells marked with * are not used. Parameters ---------- ab : (`u` + 1, M) array_like Banded matrix b : (M,) or (M, K) array_like Right-hand side overwrite_ab : bool, optional Discard data in `ab` (may enhance performance) overwrite_b : bool, optional Discard data in `b` (may enhance performance) lower : bool, optional Is the matrix in the lower form. (Default is upper form) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, K) ndarray The solution to the system a x = b. Shape of return matches shape of `b`. Notes ----- In cases when Thomas' Algorithm is not stable for an invertable matrix ``a``, the solver `solve_banded` may be used. Examples -------- Solve the banded system A x = b, where:: [ 4 2 -1 0 0 0] [1] [ 2 5 2 -1 0 0] [2] A = [-1 2 6 2 -1 0] b = [2] [ 0 -1 2 7 2 -1] [3] [ 0 0 -1 2 8 2] [3] [ 0 0 0 -1 2 9] [3] >>> from scipy.linalg import solveh_banded `ab` contains the main diagonal and the nonzero diagonals below the main diagonal. That is, we use the lower form: >>> ab = np.array([[ 4, 5, 6, 7, 8, 9], ... [ 2, 2, 2, 2, 2, 0], ... [-1, -1, -1, -1, 0, 0]]) >>> b = np.array([1, 2, 2, 3, 3, 3]) >>> x = solveh_banded(ab, b, lower=True) >>> x array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031, 0.34733894]) Solve the Hermitian banded system H x = b, where:: [ 8 2-1j 0 0 ] [ 1 ] H = [2+1j 5 1j 0 ] b = [1+1j] [ 0 -1j 9 -2-1j] [1-2j] [ 0 0 -2+1j 6 ] [ 0 ] In this example, we put the upper diagonals in the array `hb`: >>> hb = np.array([[0, 2-1j, 1j, -2-1j], ... [8, 5, 9, 6 ]]) >>> b = np.array([1, 1+1j, 1-2j, 0]) >>> x = solveh_banded(hb, b) >>> x array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j, 0.10077984-0.23035393j, -0.00479904-0.09358128j]) """""" a1 = _asarray_validated(ab, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) # Validate shapes. if a1.shape[-1] != b1.shape[0]: raise ValueError(""shapes of ab and b are not compatible."") overwrite_b = overwrite_b or _datacopied(b1, b) overwrite_ab = overwrite_ab or _datacopied(a1, ab) if a1.shape[0] == 2: ptsv, = get_lapack_funcs(('ptsv',), (a1, b1)) if lower: d = a1[0, :].real e = a1[1, :-1] else: d = a1[1, :].real e = a1[0, 1:].conj() d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, overwrite_b) else: pbsv, = get_lapack_funcs(('pbsv',), (a1, b1)) c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab, overwrite_b=overwrite_b) if info > 0: raise LinAlgError(""Thomas' algorith failed: "" + ""leading minor %d not positive definite. "" % info + ""Try solve_banded instead"") if info < 0: raise ValueError(""illegal value in %dth argument of internal "" ""pbsv"" % -info) return x " 39057,"def test_watchgodreload( tls_ca_certificate_pem_path, tls_ca_certificate_private_key_path, tmpdir ): with tmpdir.as_cwd(): config = Config(app=None) reloader = WatchGodReload(config, target=run, sockets=[]) reloader.signal_handler(sig=signal.SIGINT, frame=None) reloader.run() ","def test_watchgodreload( tmpdir ): with tmpdir.as_cwd(): config = Config(app=None) reloader = WatchGodReload(config, target=run, sockets=[]) reloader.signal_handler(sig=signal.SIGINT, frame=None) reloader.run() " 21185,"def models_with_nvtx_range(nlp, forward_color: int, backprop_color: int): pipes = [ pipe for _, pipe in nlp.components if hasattr(pipe, ""is_trainable"") and pipe.is_trainable ] # We need process all models jointly to avoid wrapping callbacks twice. models: Model = Model( ""wrap_with_nvtx_range"", forward=lambda model, X, is_train: ..., layers=[pipe.model for pipe in pipes], ) for node in models.walk(): with_nvtx_range( node, forward_color=forward_color, backprop_color=backprop_color ) return nlp ","def models_with_nvtx_range(nlp, forward_color: int, backprop_color: int): pipes = [ pipe for _, pipe in nlp.components if hasattr(pipe, ""is_trainable"") and pipe.is_trainable ] # We need to process all models jointly to avoid wrapping callbacks twice. models: Model = Model( ""wrap_with_nvtx_range"", forward=lambda model, X, is_train: ..., layers=[pipe.model for pipe in pipes], ) for node in models.walk(): with_nvtx_range( node, forward_color=forward_color, backprop_color=backprop_color ) return nlp " 30779,"def fetch_incidents(): last_run = demisto.getLastRun() last_incidents_ids = [] if last_run: last_fetch = last_run.get('time') last_fetch = datetime.strptime(last_fetch, TIME_FORMAT) last_incidents_ids = last_run.get('last_event_ids') else: # first time fetching last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0] LOG('iterating on detections, looking for more recent than {}'.format(last_fetch)) incidents = [] new_incidents_ids = [] for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2): LOG('found detection #{}'.format(raw_detection['id'])) incident = detection_to_incident(raw_detection) # the rewJson is a string of dictionary e.g. - ('{""ID"":2,""Type"":5}') incident_id = json.loads(incident['rawJSON']).get(""ID"") if incident_id not in last_incidents_ids: # makes sure that the incidents wasn't fetched before incidents.append(incident) new_incidents_ids.append(incident_id) if incidents: last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812 last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids} return last_run, incidents ","def fetch_incidents(): last_run = demisto.getLastRun() last_incidents_ids = [] if last_run: last_fetch = last_run.get('time') last_fetch = datetime.strptime(last_fetch, TIME_FORMAT) last_incidents_ids = last_run.get('last_event_ids') else: # first time fetching last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0] LOG('iterating on detections, looking for more recent than {}'.format(last_fetch)) incidents = [] new_incidents_ids = [] for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2): LOG('found detection #{}'.format(raw_detection['id'])) incident = detection_to_incident(raw_detection) # the rewJson is a string of dictionary e.g. - ('{""ID"":2,""Type"":5}') incident_id = json.loads(incident['rawJSON']).get(""ID"") if incident_id not in last_incidents_ids: # makes sure that the incident wasn't fetched before incidents.append(incident) new_incidents_ids.append(incident_id) if incidents: last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812 last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids} return last_run, incidents " 6656,"def update_available_serial_nos(available_serial_nos, sle): serial_nos = get_serial_nos(sle.serial_no) key = (sle.item_code, sle.warehouse) if key not in available_serial_nos: stock_balance = get_stock_balance_for(sle.item_code, sle.warehouse, sle.date.split(' ')[0], sle.date.split(' ')[1], batch_no=sle.batch_no) serials = get_serial_nos(stock_balance['serial_nos']) if stock_balance['serial_nos'] else [] available_serial_nos.setdefault(key, serials) existing_serial_no = available_serial_nos[key] for sn in serial_nos: if sle.actual_qty > 0: if sn in existing_serial_no: existing_serial_no.remove(sn) else: existing_serial_no.append(sn) else: if sn in existing_serial_no: existing_serial_no.remove(sn) else: existing_serial_no.append(sn) sle.balance_serial_no = '\n'.join(existing_serial_no) ","def update_available_serial_nos(available_serial_nos, sle): serial_nos = get_serial_nos(sle.serial_no) key = (sle.item_code, sle.warehouse) if key not in available_serial_nos: stock_balance = get_stock_balance_for(sle.item_code, sle.warehouse, sle.date.split(' ')[0], sle.date.split(' ')[1]) serials = get_serial_nos(stock_balance['serial_nos']) if stock_balance['serial_nos'] else [] available_serial_nos.setdefault(key, serials) existing_serial_no = available_serial_nos[key] for sn in serial_nos: if sle.actual_qty > 0: if sn in existing_serial_no: existing_serial_no.remove(sn) else: existing_serial_no.append(sn) else: if sn in existing_serial_no: existing_serial_no.remove(sn) else: existing_serial_no.append(sn) sle.balance_serial_no = '\n'.join(existing_serial_no) " 52485,"def get_app_name(bench_path, repo_name): app_name = None apps_path = os.path.join(os.path.abspath(bench_path), 'apps') config_path = os.path.join(apps_path, repo_name, 'setup.cfg') if os.path.exists(config_path): config = read_configuration(config_path) app_name = config.get('metadata', {}).get('name') if not app_name: # retrieve app name from setup.py as fallback app_path = os.path.join(apps_path, repo_name, 'setup.py') with open(app_path, 'rb') as f: app_name = re.search(r'name\s*=\s*[\'""](.*)[\'""]', f.read().decode('utf-8')).group(1) if app_name and repo_name != app_name: os.rename(os.path.join(apps_path, repo_name), os.path.join(apps_path, app_name)) return app_name ","def get_app_name(bench_path, repo_name): app_name = None apps_path = os.path.join(os.path.abspath(bench_path), 'apps') config_path = os.path.join(apps_path, repo_name, 'setup.cfg') if os.path.exists(config_path): config = read_configuration(config_path) app_name = config.get('metadata', {}).get('name') if not app_name: # retrieve app name from setup.py as fallback app_path = os.path.join(apps_path, repo_name, 'setup.py') with open(app_path, 'rb') as f: app_name = re.search(r'name\s*=\s*[\'""](.*)[\'""]', f.read().decode('utf-8')).group(1) if app_name and repo_name != app_name: os.rename(os.path.join(apps_path, repo_name), os.path.join(apps_path, app_name)) return app_name return repo_name " 47891,"def parse_yolo_region(predictions, resized_image_shape, original_im_shape, params, threshold, is_proportional): # ------------------------------------------ Validating output parameters ------------------------------------------ _, _, out_blob_h, out_blob_w = predictions.shape assert out_blob_w == out_blob_h, ""Invalid size of output blob. It sould be in NCHW layout and height should "" \ ""be equal to width. Current height = {}, current width = {}"" \ """".format(out_blob_h, out_blob_w) # ------------------------------------------ Extracting layer parameters ------------------------------------------- orig_im_h, orig_im_w = original_im_shape resized_image_h, resized_image_w = resized_image_shape objects = list() size_normalizer = (resized_image_w, resized_image_h) if params.isYoloV3 else (params.side, params.side) bbox_size = params.coords + 1 + params.classes # ------------------------------------------- Parsing YOLO Region output ------------------------------------------- for row, col, n in np.ndindex(params.side, params.side, params.num): # Getting raw values for each detection bounding box bbox = predictions[0, n*bbox_size:(n+1)*bbox_size, row, col] x, y = bbox[0:2] width, height = bbox[2:4] object_probability = bbox[4] class_probabilities = bbox[5:] if object_probability < threshold: continue # Process raw value x = (col + x) / params.side y = (row + y) / params.side # Value for exp is very big number in some cases so following construction is using here try: width = exp(width) height = exp(height) except OverflowError: continue # Depends on topology we need to normalize sizes by feature maps (up to YOLOv3) or by input shape (YOLOv3) width = width * params.anchors[2 * n] / size_normalizer[0] height = height * params.anchors[2 * n + 1] / size_normalizer[1] class_id = np.argmax(class_probabilities) confidence = class_probabilities[class_id]*object_probability if confidence < threshold: continue objects.append(scale_bbox(x=x, y=y, height=height, width=width, class_id=class_id, confidence=confidence, im_h=orig_im_h, im_w=orig_im_w, is_proportional=is_proportional)) return objects ","def parse_yolo_region(predictions, resized_image_shape, original_im_shape, params, threshold, is_proportional): # ------------------------------------------ Validating output parameters ------------------------------------------ _, _, out_blob_h, out_blob_w = predictions.shape assert out_blob_w == out_blob_h, ""Invalid size of output blob. It sould be in NCHW layout and height should "" \ ""be equal to width. Current height = {}, current width = {}"" \ """".format(out_blob_h, out_blob_w) # ------------------------------------------ Extracting layer parameters ------------------------------------------- orig_im_h, orig_im_w = original_im_shape resized_image_h, resized_image_w = resized_image_shape objects = list() size_normalizer = (resized_image_w, resized_image_h) if params.isYoloV3 else (params.side, params.side) bbox_size = params.coords + 1 + params.classes # ------------------------------------------- Parsing YOLO Region output ------------------------------------------- for row, col, n in np.ndindex(params.side, params.side, params.num): # Getting raw values for each detection bounding box bbox = predictions[0, n*bbox_size:(n+1)*bbox_size, row, col] x, y, width, height, object_probability = bbox[:5] class_probabilities = bbox[5:] if object_probability < threshold: continue # Process raw value x = (col + x) / params.side y = (row + y) / params.side # Value for exp is very big number in some cases so following construction is using here try: width = exp(width) height = exp(height) except OverflowError: continue # Depends on topology we need to normalize sizes by feature maps (up to YOLOv3) or by input shape (YOLOv3) width = width * params.anchors[2 * n] / size_normalizer[0] height = height * params.anchors[2 * n + 1] / size_normalizer[1] class_id = np.argmax(class_probabilities) confidence = class_probabilities[class_id]*object_probability if confidence < threshold: continue objects.append(scale_bbox(x=x, y=y, height=height, width=width, class_id=class_id, confidence=confidence, im_h=orig_im_h, im_w=orig_im_w, is_proportional=is_proportional)) return objects " 53353,"def get_train_dataflow(): """""" Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """""" roidbs = list(itertools.chain.from_iterable(DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN)) print_class_histogram(roidbs) # Filter out images that have no gt boxes, but this filter shall not be applied for testing. # The model does support training with empty images, but it is not useful for COCO. num = len(roidbs) if cfg.DATA.FILTER_EMPTY_ANNOTATIONS: roidbs = list(filter(lambda img: len(img[""boxes""][img[""is_crowd""] == 0]) >= 0, roidbs)) else: roidbs = list(filter(lambda img: len(img[""boxes""][img[""is_crowd""] == 0]) > 0, roidbs)) logger.info( ""Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}"".format( num - len(roidbs), len(roidbs) ) ) ds = DataFromList(roidbs, shuffle=True) preprocess = TrainingDataPreprocessor(cfg) if cfg.DATA.NUM_WORKERS > 0: if cfg.TRAINER == ""horovod"": buffer_size = cfg.DATA.NUM_WORKERS * 10 # one dataflow for each process, therefore don't need large buffer ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: buffer_size = cfg.DATA.NUM_WORKERS * 20 ds = MultiProcessMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds ","def get_train_dataflow(): """""" Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """""" roidbs = list(itertools.chain.from_iterable(DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN)) print_class_histogram(roidbs) # Filter out images that have no gt boxes, but this filter shall not be applied for testing. # The model does support training with empty images, but it is not useful for COCO. num = len(roidbs) if cfg.DATA.FILTER_EMPTY_ANNOTATIONS: roidbs = list(filter(lambda img: len(img[""boxes""][img[""is_crowd""] == 0]) > 0, roidbs)) logger.info( ""Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}"".format( num - len(roidbs), len(roidbs) ) ) ds = DataFromList(roidbs, shuffle=True) preprocess = TrainingDataPreprocessor(cfg) if cfg.DATA.NUM_WORKERS > 0: if cfg.TRAINER == ""horovod"": buffer_size = cfg.DATA.NUM_WORKERS * 10 # one dataflow for each process, therefore don't need large buffer ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: buffer_size = cfg.DATA.NUM_WORKERS * 20 ds = MultiProcessMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds " 26018,"def load_command_table(self, _): security_secure_scores_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoresOperations.{}', client_factory=cf_security_secure_scores ) security_secure_score_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoreControlsOperations.{}', client_factory=cf_security_secure_score_controls ) security_secure_score_control_definitions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoreControlDefinitionsOperations.{}', client_factory=cf_security_secure_score_control_definitions ) security_regulatory_compliance_standards_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceStandardsOperations.{}', client_factory=cf_security_regulatory_compliance_standards ) security_regulatory_compliance_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceControlsOperations.{}', client_factory=cf_security_regulatory_compliance_control ) security_regulatory_compliance_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceAssessmentsOperations.{}', client_factory=cf_security_regulatory_compliance_assessment ) security_tasks_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#TasksOperations.{}', client_factory=cf_security_tasks, operation_group='security_tasks' ) security_alerts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AlertsOperations.{}', client_factory=cf_security_alerts, operation_group='security_alerts' ) security_alerts_suppression_rule_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AlertsSuppressionRulesOperations.{}', client_factory=cf_security_alerts_suppression_rule, operation_group='security_alerts_suppression_rule' ) security_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SettingsOperations.{}', client_factory=cf_security_settings, operation_group='security_settings' ) security_contacts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecurityContactsOperations.{}', client_factory=cf_security_contacts, operation_group='security_contacts' ) security_auto_provisioning_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AutoProvisioningSettingsOperations.{}', client_factory=cf_security_auto_provisioning_settings, operation_group='security_auto_provisioning_settings' ) security_discovered_security_solutions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#DiscoveredSecuritySolutionsOperations.{}', client_factory=cf_security_discovered_security_solutions, operation_group='security_discovered_security_solutions' ) security_external_security_solutions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#ExternalSecuritySolutionsOperations.{}', client_factory=cf_security_external_security_solutions, operation_group='security_external_security_solutions' ) security_jit_network_access_policies_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#JitNetworkAccessPoliciesOperations.{}', client_factory=cf_security_jit_network_access_policies, operation_group='security_jit_network_access_policies' ) security_locations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#LocationsOperations.{}', client_factory=cf_security_locations, operation_group='security_locations' ) security_pricings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#PricingsOperations.{}', client_factory=cf_security_pricings, operation_group='security_pricings' ) security_topology_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#TopologyOperations.{}', client_factory=cf_security_topology, operation_group='security_topology' ) security_workspace_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#WorkspaceSettingsOperations.{}', client_factory=cf_security_workspace_settings, operation_group='security_workspace_settings' ) security_advanced_threat_protection_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdvancedThreatProtectionOperations.{}', client_factory=cf_security_advanced_threat_protection ) security_sql_vulnerability_assessment_scans_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScansOperations.{}', client_factory=cf_sql_vulnerability_assessment_scans ) security_sql_vulnerability_assessment_results_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScanResultsOperations.{}', client_factory=cf_sql_vulnerability_assessment_results ) security_sql_vulnerability_assessment_baseline_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentBaselineRulesOperations.{}', client_factory=cf_sql_vulnerability_assessment_baseline ) security_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AssessmentsOperations.{}', client_factory=cf_security_assessment ) security_assessment_metadata_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AssessmentMetadataOperations.{}', client_factory=cf_security_assessment_metadata ) security_sub_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SubAssessmentsOperations.{}', client_factory=cf_security_sub_assessment ) security_adaptive_application_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdaptiveApplicationControlsOperations.{}', client_factory=cf_security_adaptive_application_controls, operation_group='cf_security_adaptive_application_controls' ) security_adaptive_network_hardenings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdaptiveNetworkhardeningsOperations.{}', client_factory=cf_security_adaptive_network_hardenings, operation_group='security_adaptive_network_hardenings' ) security_allowed_connections_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AllowedConnectionsOperations.{}', client_factory=cf_security_allowed_connections, operation_group='security_allowed_connections' ) security_iot_solution_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotSolutionOperations.{}', client_factory=cf_security_iot_solution ) security_iot_analytics_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotAnalyticsOperations.{}', client_factory=cf_security_iot_analytics ) security_iot_alerts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotAlertsOperations.{}', client_factory=cf_security_iot_alerts ) security_iot_recommendations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotRecommendationsOperations.{}', client_factory=cf_security_iot_recommendations ) security_automations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AutomationsOperations.{}', client_factory=cf_security_automations, operation_group='security_automations' ) with self.command_group('security secure-scores', security_secure_scores_sdk, client_factory=cf_security_secure_scores) as g: g.custom_command('list', 'list_secure_scores') g.custom_show_command('show', 'get_secure_score') with self.command_group('security secure-score-controls', security_secure_score_controls_sdk, client_factory=cf_security_secure_score_controls) as g: g.custom_command('list', 'list_secure_score_controls') g.custom_show_command('list_by_score', 'list_by_score') with self.command_group('security secure-score-control-definitions', security_secure_score_control_definitions_sdk, client_factory=cf_security_secure_score_control_definitions) as g: g.custom_command('list', 'list_secure_score_control_definitions') with self.command_group('security regulatory-compliance-standards', security_regulatory_compliance_standards_sdk, client_factory=cf_security_regulatory_compliance_standards) as g: g.custom_command('list', 'list_regulatory_compliance_standards') g.custom_show_command('show', 'get_regulatory_compliance_standard') with self.command_group('security regulatory-compliance-controls', security_regulatory_compliance_controls_sdk, client_factory=cf_security_regulatory_compliance_control) as g: g.custom_command('list', 'list_regulatory_compliance_controls') g.custom_show_command('show', 'get_regulatory_compliance_control') with self.command_group('security regulatory-compliance-assessments', security_regulatory_compliance_assessment_sdk, client_factory=cf_security_regulatory_compliance_assessment) as g: g.custom_command('list', 'list_regulatory_compliance_assessments') g.custom_show_command('show', 'get_regulatory_compliance_assessment') with self.command_group('security task', security_tasks_sdk, client_factory=cf_security_tasks) as g: g.custom_command('list', 'list_security_tasks') g.custom_show_command('show', 'get_security_task') with self.command_group('security alerts-suppression-rule', security_alerts_suppression_rule_sdk, client_factory=cf_security_alerts_suppression_rule) as g: g.custom_command('list', 'list_security_alerts_suppression_rule') g.custom_show_command('show', 'show_security_alerts_suppression_rule') g.custom_command('delete', 'delete_security_alerts_suppression_rule') g.custom_command('update', 'update_security_alerts_suppression_rule') g.custom_command('upsert_scope', 'upsert_security_alerts_suppression_rule_scope') g.custom_command('delete_scope', 'delete_security_alerts_suppression_rule_scope') for scope in ['storage', 'cosmosdb']: with self.command_group(f""security atp {scope}"", security_advanced_threat_protection_sdk, client_factory=cf_security_advanced_threat_protection) as g: g.custom_command('show', f""get_{scope}_atp_setting"") g.custom_command('update', f""update_{scope}_atp_setting"") with self.command_group('security va sql scans', security_sql_vulnerability_assessment_scans_sdk, client_factory=cf_sql_vulnerability_assessment_scans) as g: g.custom_show_command('show', 'get_va_sql_scan') g.custom_command('list', 'list_va_sql_scans') with self.command_group('security va sql results', security_sql_vulnerability_assessment_results_sdk, client_factory=cf_sql_vulnerability_assessment_results) as g: g.custom_show_command('show', 'get_va_sql_result') g.custom_command('list', 'list_va_sql_results') with self.command_group('security va sql baseline', security_sql_vulnerability_assessment_baseline_sdk, client_factory=cf_sql_vulnerability_assessment_baseline) as g: g.custom_show_command('show', 'get_va_sql_baseline') g.custom_command('list', 'list_va_sql_baseline') g.custom_command('delete', 'delete_va_sql_baseline') g.custom_command('update', 'update_va_sql_baseline') g.custom_command('set', 'set_va_sql_baseline') with self.command_group('security alert', security_alerts_sdk, client_factory=cf_security_alerts) as g: g.custom_command('list', 'list_security_alerts') g.custom_show_command('show', 'get_security_alert') g.custom_command('update', 'update_security_alert') with self.command_group('security setting', security_settings_sdk, client_factory=cf_security_settings) as g: g.custom_command('list', 'list_security_settings') g.custom_show_command('show', 'get_security_setting') g.custom_command('update', 'update_security_setting') with self.command_group('security contact', security_contacts_sdk, client_factory=cf_security_contacts) as g: g.custom_command('list', 'list_security_contacts') g.custom_show_command('show', 'get_security_contact') g.custom_command('create', 'create_security_contact') g.custom_command('delete', 'delete_security_contact') with self.command_group('security auto-provisioning-setting', security_auto_provisioning_settings_sdk, client_factory=cf_security_auto_provisioning_settings) as g: g.custom_command('list', 'list_security_auto_provisioning_settings') g.custom_show_command('show', 'get_security_auto_provisioning_setting') g.custom_command('update', 'update_security_auto_provisioning_setting') with self.command_group('security discovered-security-solution', security_discovered_security_solutions_sdk, client_factory=cf_security_discovered_security_solutions) as g: g.custom_command('list', 'list_security_discovered_security_solutions') g.custom_show_command('show', 'get_security_discovered_security_solution') with self.command_group('security external-security-solution', security_external_security_solutions_sdk, client_factory=cf_security_external_security_solutions) as g: g.custom_command('list', 'list_security_external_security_solutions') g.custom_show_command('show', 'get_security_external_security_solution') with self.command_group('security jit-policy', security_jit_network_access_policies_sdk, client_factory=cf_security_jit_network_access_policies) as g: g.custom_command('list', 'list_security_jit_network_access_policies') g.custom_show_command('show', 'get_security_jit_network_access_policy') with self.command_group('security location', security_locations_sdk, client_factory=cf_security_locations) as g: g.custom_command('list', 'list_security_locations') g.custom_show_command('show', 'get_security_location') with self.command_group('security pricing', security_pricings_sdk, client_factory=cf_security_pricings) as g: g.custom_command('list', 'list_security_pricings') g.custom_show_command('show', 'get_security_pricing') g.custom_command('create', 'create_security_pricing') with self.command_group('security topology', security_topology_sdk, client_factory=cf_security_topology) as g: g.custom_command('list', 'list_security_topology') g.custom_show_command('show', 'get_security_topology') with self.command_group('security workspace-setting', security_workspace_settings_sdk, client_factory=cf_security_workspace_settings) as g: g.custom_command('list', 'list_security_workspace_settings') g.custom_show_command('show', 'get_security_workspace_setting') g.custom_command('create', 'create_security_workspace_setting') g.custom_command('delete', 'delete_security_workspace_setting') with self.command_group('security assessment', security_assessment_sdk, client_factory=cf_security_assessment) as g: g.custom_command('list', 'list_security_assessments') g.custom_show_command('show', 'get_security_assessment') g.custom_command('create', 'create_security_assessment') g.custom_command('delete', 'delete_security_assessment') with self.command_group('security assessment-metadata', security_assessment_metadata_sdk, client_factory=cf_security_assessment_metadata) as g: g.custom_command('list', 'list_security_assessment_metadata') g.custom_show_command('show', 'get_security_assessment_metadata') g.custom_command('create', 'create_security_assessment_metadata') g.custom_command('delete', 'delete_security_assessment_metadata') with self.command_group('security sub-assessment', security_sub_assessment_sdk, client_factory=cf_security_sub_assessment) as g: g.custom_command('list', 'list_security_sub_assessments') g.custom_show_command('show', 'get_security_sub_assessment') with self.command_group('security adaptive-application-controls', security_adaptive_application_controls_sdk, client_factory=cf_security_adaptive_application_controls) as g: g.custom_command('list', 'list_security_adaptive_application_controls') g.custom_show_command('show', 'get_security_adaptive_application_controls') with self.command_group('security adaptive_network_hardenings', security_adaptive_network_hardenings_sdk, client_factory=cf_security_adaptive_network_hardenings) as g: g.custom_show_command('show', 'get_security_adaptive_network_hardenings') g.custom_command('list', 'list_security_adaptive_network_hardenings') with self.command_group('security allowed_connections', security_allowed_connections_sdk, client_factory=cf_security_allowed_connections) as g: g.custom_command('list', 'list_security_allowed_connections') g.custom_show_command('show', 'get_security_allowed_connections') with self.command_group('security iot-solution', security_iot_solution_sdk, client_factory=cf_security_iot_solution) as g: g.custom_command('list', 'list_security_iot_solution') g.custom_show_command('show', 'show_security_iot_solution') g.custom_command('create', 'create_security_iot_solution') g.custom_command('delete', 'delete_security_iot_solution') g.custom_command('update', 'update_security_iot_solution') with self.command_group('security iot-analytics', security_iot_analytics_sdk, client_factory=cf_security_iot_analytics) as g: g.custom_command('list', 'list_security_iot_analytics') g.custom_show_command('show', 'show_security_iot_analytics') with self.command_group('security iot-alerts', security_iot_alerts_sdk, client_factory=cf_security_iot_alerts) as g: g.custom_command('list', 'list_security_iot_alerts') g.custom_show_command('show', 'show_security_iot_alerts') g.custom_command('delete', 'dismiss_security_iot_alerts') with self.command_group('security iot-recommendations', security_iot_recommendations_sdk, client_factory=cf_security_iot_recommendations) as g: g.custom_command('list', 'list_security_iot_recommendations') g.custom_show_command('show', 'show_security_iot_recommendations') with self.command_group('security automation', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('list', 'list_security_automations') g.custom_show_command('show', 'get_security_automation') g.custom_command('delete', 'delete_security_automation') g.custom_command('create_or_update', 'create_or_update_security_automation') g.custom_command('validate', 'validate_security_automation') with self.command_group('security automation-scope', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_scope') with self.command_group('security automation-rule', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_rule') with self.command_group('security automation-rule-set', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_rule_set') with self.command_group('security automation-source', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_source') with self.command_group('security automation-action-logic-app', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_logic_app') with self.command_group('security automation-action-event-hub', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_event_hub') with self.command_group('security automation-action-workspace', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_workspace') with self.command_group('security'): pass ","def load_command_table(self, _): security_secure_scores_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoresOperations.{}', client_factory=cf_security_secure_scores ) security_secure_score_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoreControlsOperations.{}', client_factory=cf_security_secure_score_controls ) security_secure_score_control_definitions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecureScoreControlDefinitionsOperations.{}', client_factory=cf_security_secure_score_control_definitions ) security_regulatory_compliance_standards_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceStandardsOperations.{}', client_factory=cf_security_regulatory_compliance_standards ) security_regulatory_compliance_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceControlsOperations.{}', client_factory=cf_security_regulatory_compliance_control ) security_regulatory_compliance_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#RegulatoryComplianceAssessmentsOperations.{}', client_factory=cf_security_regulatory_compliance_assessment ) security_tasks_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#TasksOperations.{}', client_factory=cf_security_tasks, operation_group='security_tasks' ) security_alerts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AlertsOperations.{}', client_factory=cf_security_alerts, operation_group='security_alerts' ) security_alerts_suppression_rule_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AlertsSuppressionRulesOperations.{}', client_factory=cf_security_alerts_suppression_rule, operation_group='security_alerts_suppression_rule' ) security_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SettingsOperations.{}', client_factory=cf_security_settings, operation_group='security_settings' ) security_contacts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SecurityContactsOperations.{}', client_factory=cf_security_contacts, operation_group='security_contacts' ) security_auto_provisioning_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AutoProvisioningSettingsOperations.{}', client_factory=cf_security_auto_provisioning_settings, operation_group='security_auto_provisioning_settings' ) security_discovered_security_solutions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#DiscoveredSecuritySolutionsOperations.{}', client_factory=cf_security_discovered_security_solutions, operation_group='security_discovered_security_solutions' ) security_external_security_solutions_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#ExternalSecuritySolutionsOperations.{}', client_factory=cf_security_external_security_solutions, operation_group='security_external_security_solutions' ) security_jit_network_access_policies_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#JitNetworkAccessPoliciesOperations.{}', client_factory=cf_security_jit_network_access_policies, operation_group='security_jit_network_access_policies' ) security_locations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#LocationsOperations.{}', client_factory=cf_security_locations, operation_group='security_locations' ) security_pricings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#PricingsOperations.{}', client_factory=cf_security_pricings, operation_group='security_pricings' ) security_topology_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#TopologyOperations.{}', client_factory=cf_security_topology, operation_group='security_topology' ) security_workspace_settings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#WorkspaceSettingsOperations.{}', client_factory=cf_security_workspace_settings, operation_group='security_workspace_settings' ) security_advanced_threat_protection_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdvancedThreatProtectionOperations.{}', client_factory=cf_security_advanced_threat_protection ) security_sql_vulnerability_assessment_scans_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScansOperations.{}', client_factory=cf_sql_vulnerability_assessment_scans ) security_sql_vulnerability_assessment_results_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentScanResultsOperations.{}', client_factory=cf_sql_vulnerability_assessment_results ) security_sql_vulnerability_assessment_baseline_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SqlVulnerabilityAssessmentBaselineRulesOperations.{}', client_factory=cf_sql_vulnerability_assessment_baseline ) security_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AssessmentsOperations.{}', client_factory=cf_security_assessment ) security_assessment_metadata_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AssessmentMetadataOperations.{}', client_factory=cf_security_assessment_metadata ) security_sub_assessment_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#SubAssessmentsOperations.{}', client_factory=cf_security_sub_assessment ) security_adaptive_application_controls_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdaptiveApplicationControlsOperations.{}', client_factory=cf_security_adaptive_application_controls, operation_group='cf_security_adaptive_application_controls' ) security_adaptive_network_hardenings_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AdaptiveNetworkhardeningsOperations.{}', client_factory=cf_security_adaptive_network_hardenings, operation_group='security_adaptive_network_hardenings' ) security_allowed_connections_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AllowedConnectionsOperations.{}', client_factory=cf_security_allowed_connections, operation_group='security_allowed_connections' ) security_iot_solution_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotSolutionOperations.{}', client_factory=cf_security_iot_solution ) security_iot_analytics_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotAnalyticsOperations.{}', client_factory=cf_security_iot_analytics ) security_iot_alerts_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotAlertsOperations.{}', client_factory=cf_security_iot_alerts ) security_iot_recommendations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#IotRecommendationsOperations.{}', client_factory=cf_security_iot_recommendations ) security_automations_sdk = CliCommandType( operations_tmpl='azure.mgmt.security.operations#AutomationsOperations.{}', client_factory=cf_security_automations, operation_group='security_automations' ) with self.command_group('security secure-scores', security_secure_scores_sdk, client_factory=cf_security_secure_scores) as g: g.custom_command('list', 'list_secure_scores') g.custom_show_command('show', 'get_secure_score') with self.command_group('security secure-score-controls', security_secure_score_controls_sdk, client_factory=cf_security_secure_score_controls) as g: g.custom_command('list', 'list_secure_score_controls') g.custom_show_command('list_by_score', 'list_by_score') with self.command_group('security secure-score-control-definitions', security_secure_score_control_definitions_sdk, client_factory=cf_security_secure_score_control_definitions) as g: g.custom_command('list', 'list_secure_score_control_definitions') with self.command_group('security regulatory-compliance-standards', security_regulatory_compliance_standards_sdk, client_factory=cf_security_regulatory_compliance_standards) as g: g.custom_command('list', 'list_regulatory_compliance_standards') g.custom_show_command('show', 'get_regulatory_compliance_standard') with self.command_group('security regulatory-compliance-controls', security_regulatory_compliance_controls_sdk, client_factory=cf_security_regulatory_compliance_control) as g: g.custom_command('list', 'list_regulatory_compliance_controls') g.custom_show_command('show', 'get_regulatory_compliance_control') with self.command_group('security regulatory-compliance-assessments', security_regulatory_compliance_assessment_sdk, client_factory=cf_security_regulatory_compliance_assessment) as g: g.custom_command('list', 'list_regulatory_compliance_assessments') g.custom_show_command('show', 'get_regulatory_compliance_assessment') with self.command_group('security task', security_tasks_sdk, client_factory=cf_security_tasks) as g: g.custom_command('list', 'list_security_tasks') g.custom_show_command('show', 'get_security_task') with self.command_group('security alerts-suppression-rule', security_alerts_suppression_rule_sdk, client_factory=cf_security_alerts_suppression_rule) as g: g.custom_command('list', 'list_security_alerts_suppression_rule') g.custom_show_command('show', 'show_security_alerts_suppression_rule') g.custom_command('delete', 'delete_security_alerts_suppression_rule') g.custom_command('update', 'update_security_alerts_suppression_rule') g.custom_command('upsert_scope', 'upsert_security_alerts_suppression_rule_scope') g.custom_command('delete_scope', 'delete_security_alerts_suppression_rule_scope') for scope in ['storage', 'cosmosdb']: with self.command_group(f""security atp {scope}"", security_advanced_threat_protection_sdk, client_factory=cf_security_advanced_threat_protection) as g: g.custom_show_command('show', f""get_{scope}_atp_setting"") g.custom_command('update', f""update_{scope}_atp_setting"") with self.command_group('security va sql scans', security_sql_vulnerability_assessment_scans_sdk, client_factory=cf_sql_vulnerability_assessment_scans) as g: g.custom_show_command('show', 'get_va_sql_scan') g.custom_command('list', 'list_va_sql_scans') with self.command_group('security va sql results', security_sql_vulnerability_assessment_results_sdk, client_factory=cf_sql_vulnerability_assessment_results) as g: g.custom_show_command('show', 'get_va_sql_result') g.custom_command('list', 'list_va_sql_results') with self.command_group('security va sql baseline', security_sql_vulnerability_assessment_baseline_sdk, client_factory=cf_sql_vulnerability_assessment_baseline) as g: g.custom_show_command('show', 'get_va_sql_baseline') g.custom_command('list', 'list_va_sql_baseline') g.custom_command('delete', 'delete_va_sql_baseline') g.custom_command('update', 'update_va_sql_baseline') g.custom_command('set', 'set_va_sql_baseline') with self.command_group('security alert', security_alerts_sdk, client_factory=cf_security_alerts) as g: g.custom_command('list', 'list_security_alerts') g.custom_show_command('show', 'get_security_alert') g.custom_command('update', 'update_security_alert') with self.command_group('security setting', security_settings_sdk, client_factory=cf_security_settings) as g: g.custom_command('list', 'list_security_settings') g.custom_show_command('show', 'get_security_setting') g.custom_command('update', 'update_security_setting') with self.command_group('security contact', security_contacts_sdk, client_factory=cf_security_contacts) as g: g.custom_command('list', 'list_security_contacts') g.custom_show_command('show', 'get_security_contact') g.custom_command('create', 'create_security_contact') g.custom_command('delete', 'delete_security_contact') with self.command_group('security auto-provisioning-setting', security_auto_provisioning_settings_sdk, client_factory=cf_security_auto_provisioning_settings) as g: g.custom_command('list', 'list_security_auto_provisioning_settings') g.custom_show_command('show', 'get_security_auto_provisioning_setting') g.custom_command('update', 'update_security_auto_provisioning_setting') with self.command_group('security discovered-security-solution', security_discovered_security_solutions_sdk, client_factory=cf_security_discovered_security_solutions) as g: g.custom_command('list', 'list_security_discovered_security_solutions') g.custom_show_command('show', 'get_security_discovered_security_solution') with self.command_group('security external-security-solution', security_external_security_solutions_sdk, client_factory=cf_security_external_security_solutions) as g: g.custom_command('list', 'list_security_external_security_solutions') g.custom_show_command('show', 'get_security_external_security_solution') with self.command_group('security jit-policy', security_jit_network_access_policies_sdk, client_factory=cf_security_jit_network_access_policies) as g: g.custom_command('list', 'list_security_jit_network_access_policies') g.custom_show_command('show', 'get_security_jit_network_access_policy') with self.command_group('security location', security_locations_sdk, client_factory=cf_security_locations) as g: g.custom_command('list', 'list_security_locations') g.custom_show_command('show', 'get_security_location') with self.command_group('security pricing', security_pricings_sdk, client_factory=cf_security_pricings) as g: g.custom_command('list', 'list_security_pricings') g.custom_show_command('show', 'get_security_pricing') g.custom_command('create', 'create_security_pricing') with self.command_group('security topology', security_topology_sdk, client_factory=cf_security_topology) as g: g.custom_command('list', 'list_security_topology') g.custom_show_command('show', 'get_security_topology') with self.command_group('security workspace-setting', security_workspace_settings_sdk, client_factory=cf_security_workspace_settings) as g: g.custom_command('list', 'list_security_workspace_settings') g.custom_show_command('show', 'get_security_workspace_setting') g.custom_command('create', 'create_security_workspace_setting') g.custom_command('delete', 'delete_security_workspace_setting') with self.command_group('security assessment', security_assessment_sdk, client_factory=cf_security_assessment) as g: g.custom_command('list', 'list_security_assessments') g.custom_show_command('show', 'get_security_assessment') g.custom_command('create', 'create_security_assessment') g.custom_command('delete', 'delete_security_assessment') with self.command_group('security assessment-metadata', security_assessment_metadata_sdk, client_factory=cf_security_assessment_metadata) as g: g.custom_command('list', 'list_security_assessment_metadata') g.custom_show_command('show', 'get_security_assessment_metadata') g.custom_command('create', 'create_security_assessment_metadata') g.custom_command('delete', 'delete_security_assessment_metadata') with self.command_group('security sub-assessment', security_sub_assessment_sdk, client_factory=cf_security_sub_assessment) as g: g.custom_command('list', 'list_security_sub_assessments') g.custom_show_command('show', 'get_security_sub_assessment') with self.command_group('security adaptive-application-controls', security_adaptive_application_controls_sdk, client_factory=cf_security_adaptive_application_controls) as g: g.custom_command('list', 'list_security_adaptive_application_controls') g.custom_show_command('show', 'get_security_adaptive_application_controls') with self.command_group('security adaptive_network_hardenings', security_adaptive_network_hardenings_sdk, client_factory=cf_security_adaptive_network_hardenings) as g: g.custom_show_command('show', 'get_security_adaptive_network_hardenings') g.custom_command('list', 'list_security_adaptive_network_hardenings') with self.command_group('security allowed_connections', security_allowed_connections_sdk, client_factory=cf_security_allowed_connections) as g: g.custom_command('list', 'list_security_allowed_connections') g.custom_show_command('show', 'get_security_allowed_connections') with self.command_group('security iot-solution', security_iot_solution_sdk, client_factory=cf_security_iot_solution) as g: g.custom_command('list', 'list_security_iot_solution') g.custom_show_command('show', 'show_security_iot_solution') g.custom_command('create', 'create_security_iot_solution') g.custom_command('delete', 'delete_security_iot_solution') g.custom_command('update', 'update_security_iot_solution') with self.command_group('security iot-analytics', security_iot_analytics_sdk, client_factory=cf_security_iot_analytics) as g: g.custom_command('list', 'list_security_iot_analytics') g.custom_show_command('show', 'show_security_iot_analytics') with self.command_group('security iot-alerts', security_iot_alerts_sdk, client_factory=cf_security_iot_alerts) as g: g.custom_command('list', 'list_security_iot_alerts') g.custom_show_command('show', 'show_security_iot_alerts') g.custom_command('delete', 'dismiss_security_iot_alerts') with self.command_group('security iot-recommendations', security_iot_recommendations_sdk, client_factory=cf_security_iot_recommendations) as g: g.custom_command('list', 'list_security_iot_recommendations') g.custom_show_command('show', 'show_security_iot_recommendations') with self.command_group('security automation', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('list', 'list_security_automations') g.custom_show_command('show', 'get_security_automation') g.custom_command('delete', 'delete_security_automation') g.custom_command('create_or_update', 'create_or_update_security_automation') g.custom_command('validate', 'validate_security_automation') with self.command_group('security automation-scope', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_scope') with self.command_group('security automation-rule', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_rule') with self.command_group('security automation-rule-set', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_rule_set') with self.command_group('security automation-source', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_source') with self.command_group('security automation-action-logic-app', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_logic_app') with self.command_group('security automation-action-event-hub', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_event_hub') with self.command_group('security automation-action-workspace', security_automations_sdk, client_factory=cf_security_automations) as g: g.custom_command('create', 'create_security_automation_action_workspace') with self.command_group('security'): pass " 39677,"def main(): module = ForemanEntityAnsibleModule( argument_spec=dict( updated_title=dict(type='str'), ), entity_spec=dict( title=dict(type='str', required=True), original_filename=dict(type='str', required=False), scap_file=dict(type='str'), organizations=dict(type='entity_list', flat_name='organization_ids'), locations=dict(type='entity_list', flat_name='location_ids'), ), ) entity_dict = module.clean_params() module.connect() entity = module.find_scapcontent(entity_dict['title'], failsafe=True) if not module.desired_absent: if entity and 'updated_title' in entity_dict: entity_dict['title'] = entity_dict.pop('updated_title') if not entity and 'scap_file' not in entity_dict: module.fail_json(msg=""Content of scap_file not provided. XML containing SCAP content is required."") if 'locations' in entity_dict: entity_dict['locations'] = module.find_resources_by_title('locations', entity_dict['locations'], thin=True) if 'organizations' in entity_dict: entity_dict['organizations'] = module.find_resources_by_name('organizations', entity_dict['organizations'], thin=True) changed = module.ensure_entity_state('scap_contents', entity_dict, entity) module.exit_json(changed=changed, entity_dict=entity_dict) ","def main(): module = ForemanEntityAnsibleModule( argument_spec=dict( updated_title=dict(type='str'), ), foreman_spec=dict( title=dict(type='str', required=True), original_filename=dict(type='str', required=False), scap_file=dict(type='str'), organizations=dict(type='entity_list', flat_name='organization_ids'), locations=dict(type='entity_list', flat_name='location_ids'), ), ) entity_dict = module.clean_params() module.connect() entity = module.find_scapcontent(entity_dict['title'], failsafe=True) if not module.desired_absent: if entity and 'updated_title' in entity_dict: entity_dict['title'] = entity_dict.pop('updated_title') if not entity and 'scap_file' not in entity_dict: module.fail_json(msg=""Content of scap_file not provided. XML containing SCAP content is required."") if 'locations' in entity_dict: entity_dict['locations'] = module.find_resources_by_title('locations', entity_dict['locations'], thin=True) if 'organizations' in entity_dict: entity_dict['organizations'] = module.find_resources_by_name('organizations', entity_dict['organizations'], thin=True) changed = module.ensure_entity_state('scap_contents', entity_dict, entity) module.exit_json(changed=changed, entity_dict=entity_dict) " 32121,"def fetch_incidents(client, last_run, first_fetch_time, min_severity): # Get the last fetch details, if exist last_fetch_time = last_run.get('last_fetch_time') last_fetch_ids = last_run.get('last_fetch_ids', []) last_incident_number = last_run.get('last_incident_number') demisto.debug(f""last fetch time is: {last_fetch_time}, last fetch ids are:{last_fetch_ids}"") # Handle first time fetch if last_fetch_time is None: # this is the first fetch, or the First fetch timestamp was reset demisto.debug(""im in last fetch time is none"") # handle via time stamp last_fetch_time_str, _ = parse_date_range(first_fetch_time, DATE_FORMAT) last_fetch_time = dateparser.parse(last_fetch_time_str) latest_created_time = last_fetch_time latest_created_time_str = latest_created_time.strftime(DATE_FORMAT) command_args = {'filter': f'properties/createdTimeUtc ge {latest_created_time_str}'} first_fetch = True else: demisto.debug(""handle via id"") # handle via id last_fetch_time = dateparser.parse(last_fetch_time) latest_created_time = last_fetch_time command_args = {'filter': f'properties/incidentNumber ge {last_incident_number}'} first_fetch = False demisto.debug(f""command_args:{command_args}, first_fetch:{first_fetch}"") command_result = list_incidents_command(client, command_args, is_fetch_incidents=True, first_fetch=first_fetch) raw_incidents = command_result.outputs demisto.debug(f""these are the raw incidents:{raw_incidents}"") next_run, incidents = process_incidents(raw_incidents, last_fetch_ids, min_severity, latest_created_time, last_incident_number) return next_run, incidents ","def fetch_incidents(client, last_run, first_fetch_time, min_severity): # Get the last fetch details, if exist last_fetch_time = last_run.get('last_fetch_time') last_fetch_ids = last_run.get('last_fetch_ids', []) last_incident_number = last_run.get('last_incident_number') demisto.debug(f""{last_fetch_time=}, {last_fetch_ids=}"") # Handle first time fetch if last_fetch_time is None: # this is the first fetch, or the First fetch timestamp was reset demisto.debug(""im in last fetch time is none"") # handle via time stamp last_fetch_time_str, _ = parse_date_range(first_fetch_time, DATE_FORMAT) last_fetch_time = dateparser.parse(last_fetch_time_str) latest_created_time = last_fetch_time latest_created_time_str = latest_created_time.strftime(DATE_FORMAT) command_args = {'filter': f'properties/createdTimeUtc ge {latest_created_time_str}'} first_fetch = True else: demisto.debug(""handle via id"") # handle via id last_fetch_time = dateparser.parse(last_fetch_time) latest_created_time = last_fetch_time command_args = {'filter': f'properties/incidentNumber ge {last_incident_number}'} first_fetch = False demisto.debug(f""command_args:{command_args}, first_fetch:{first_fetch}"") command_result = list_incidents_command(client, command_args, is_fetch_incidents=True, first_fetch=first_fetch) raw_incidents = command_result.outputs demisto.debug(f""these are the raw incidents:{raw_incidents}"") next_run, incidents = process_incidents(raw_incidents, last_fetch_ids, min_severity, latest_created_time, last_incident_number) return next_run, incidents " 54393,"def capture_compile(args): TOOLSDIR = Path(common.get_make_flag(""TOOLSDIR"")) env = dict(os.environ) make_symlinks(env) env[""PATH""] = str(TOOLSDIR) + "":"" + os.environ[""PATH""] cmd = [sys.executable, ""setup.py"", ""install""] if args.install_dir == ""skip"": cmd[-1] = ""build"" elif args.install_dir != """": cmd.extend([""--home"", args.install_dir]) cmd.extend(args.setupflags.split()) result = subprocess.run(cmd, env=env) if result.returncode != 0: build_log_path = Path(""build.log"") if build_log_path.exists(): build_log_path.unlink() sys.exit(result.returncode) ","def capture_compile(args): TOOLSDIR = Path(common.get_make_flag(""TOOLSDIR"")) env = dict(os.environ) make_symlinks(env) env[""PATH""] = str(TOOLSDIR) + "":"" + os.environ[""PATH""] cmd = [sys.executable, ""setup.py"", ""install""] if args.install_dir == ""skip"": cmd[-1] = ""build"" elif args.install_dir != """": cmd.extend([""--home"", args.install_dir]) if args.setupflags.split(): cmd.extend(args.setupflags.split()) result = subprocess.run(cmd, env=env) if result.returncode != 0: build_log_path = Path(""build.log"") if build_log_path.exists(): build_log_path.unlink() sys.exit(result.returncode) " 772,"def auto_response_msmt(gtab, data, tol=20, roi_center=None, roi_radii=10, wm_fa_thr=0.7, gm_fa_thr=0.3, csf_fa_thr=0.15, gm_md_thr=0.001, csf_md_thr=0.0032): """""" Automatic estimation of multi-shell multi-tissue (msmt) response functions using FA and MD. Parameters ---------- gtab : GradientTable data : ndarray diffusion data roi_center : array-like, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radii : int or array-like, (3,) radii of cuboid ROI wm_fa_thr : float FA threshold for WM. gm_fa_thr : float FA threshold for GM. csf_fa_thr : float FA threshold for CSF. gm_md_thr : float MD threshold for GM. csf_md_thr : float MD threshold for CSF. Returns ------- response_wm : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for WM for each unique bvalues (except b0). response_gm : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for GM for each unique bvalues (except b0). response_csf : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for CSF for each unique bvalues (except b0). Notes ----- In msmt-CSD there is an important pre-processing step: the estimation of every tissue's response function. In order to do this, we look for voxels corresponding to WM, GM and CSF. We get this information from mcsd.mask_for_response_msmt(), which returns masks of selected voxels (more details are available in the description of the function). With the masks, we compute the response functions by using mcsd.response_from_mask_msmt(), which returns the `response` for each tissue (more details are available in the description of the function). """""" list_bvals = unique_bvals_tolerance(gtab.bvals) if not np.all(list_bvals <= 1200): msg_bvals = """"""Some b-values are higher than 1200. The DTI fit might be affected. It is adviced to use mask_for_response_msmt with bvalues lower than 1200, followed by response_from_mask_msmt with all bvalues to overcome this."""""" warnings.warn(msg_bvals, UserWarning) mask_wm, mask_gm, mask_csf = mask_for_response_msmt(gtab, data, roi_center, roi_radii, wm_fa_thr, gm_fa_thr, csf_fa_thr, gm_md_thr, csf_md_thr) response_wm, response_gm, response_csf = response_from_mask_msmt( gtab, data, mask_wm, mask_gm, mask_csf, tol) return response_wm, response_gm, response_csf ","def auto_response_msmt(gtab, data, tol=20, roi_center=None, roi_radii=10, wm_fa_thr=0.7, gm_fa_thr=0.3, csf_fa_thr=0.15, gm_md_thr=0.001, csf_md_thr=0.0032): """""" Automatic estimation of multi-shell multi-tissue (msmt) response functions using FA and MD. Parameters ---------- gtab : GradientTable data : ndarray diffusion data roi_center : array-like, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radii : int or array-like, (3,) radii of cuboid ROI wm_fa_thr : float FA threshold for WM. gm_fa_thr : float FA threshold for GM. csf_fa_thr : float FA threshold for CSF. gm_md_thr : float MD threshold for GM. csf_md_thr : float MD threshold for CSF. Returns ------- response_wm : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for WM for each unique bvalues (except b0). response_gm : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for GM for each unique bvalues (except b0). response_csf : ndarray, (len(unique_bvals_tolerance(gtab.bvals))-1, 4) (`evals`, `S0`) for CSF for each unique bvalues (except b0). Notes ----- In msmt-CSD there is an important pre-processing step: the estimation of every tissue's response function. In order to do this, we look for voxels corresponding to WM, GM and CSF. We get this information from mcsd.mask_for_response_msmt(), which returns masks of selected voxels (more details are available in the description of the function). With the masks, we compute the response functions by using mcsd.response_from_mask_msmt(), which returns the `response` for each tissue (more details are available in the description of the function). """""" list_bvals = unique_bvals_tolerance(gtab.bvals) if not np.all(list_bvals <= 1200): msg_bvals = """"""Some b-values are higher than 1200. The DTI fit might be affected. It is advised to use mask_for_response_msmt with bvalues lower than 1200, followed by response_from_mask_msmt with all bvalues to overcome this."""""" warnings.warn(msg_bvals, UserWarning) mask_wm, mask_gm, mask_csf = mask_for_response_msmt(gtab, data, roi_center, roi_radii, wm_fa_thr, gm_fa_thr, csf_fa_thr, gm_md_thr, csf_md_thr) response_wm, response_gm, response_csf = response_from_mask_msmt( gtab, data, mask_wm, mask_gm, mask_csf, tol) return response_wm, response_gm, response_csf " 41417,"def add_gaia_figure_elements(tpf, fig, magnitude_limit=18): """"""Make the Gaia Figure Elements"""""" #Get the positions of the Gaia sources c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg') # Use pixel scale for query size pix_scale = 4.0 # arcseconds / pixel for Kepler, default if tpf.mission == 'TESS': pix_scale = 21.0 # We are querying with a diameter as the radius, overfilling by 2x. result = Vizier.query_region(c1, catalog=[""I/345/gaia2""], radius=Angle(np.max(tpf.shape[1:]) * pix_scale, ""arcsec"")) if result is None: raise ValueError('No targets found in region.') result = result[""I/345/gaia2""].to_pandas() result = result[result.Gmag < magnitude_limit] radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is this supposed to be zero or one????? year = ((tpf.astropy_time[0].jd - 2457206.375) * u.day).to(u.year) pmra = ((np.asarray(result.pmRA) * u.milliarcsecond/u.year) * year).to(u.arcsec).value pmdec = ((np.asarray(result.pmDE) * u.milliarcsecond/u.year) * year).to(u.arcsec).value ## todo: filter NaNs in pmra/pmdec result.RA_ICRS += pmra result.DE_ICRS += pmdec # Gently size the points by their Gaia magnitude sizes = 64.0 / 2**(result['Gmag']/5.0) source = ColumnDataSource(data=dict(ra=result['RA_ICRS'], dec=result['DE_ICRS'], source=result['Source'], Gmag=result['Gmag'], plx=result['Plx'], x=coords[:, 0]+tpf.column, y=coords[:, 1]+tpf.row, size=sizes)) r = fig.circle('x', 'y', source=source,fill_alpha=0.3, size='size', line_color=None, selection_color=""firebrick"",nonselection_fill_alpha=0.0, nonselection_line_color=None, nonselection_line_alpha=0.0, fill_color=""firebrick"", hover_fill_color=""firebrick"", hover_alpha=0.9, hover_line_color=""white"") fig.add_tools(HoverTool(tooltips=[(""Source"", ""@source""),(""G"", ""@Gmag""),(""Parallax"", ""@plx""), (""RA"", ""@ra{0,0.00000000}""), (""DEC"", ""@dec{0,0.00000000}""), (""x"", ""@x""), (""y"", ""@y"")], renderers=[r], mode='mouse', point_policy=""snap_to_data"")) return fig, r ","def add_gaia_figure_elements(tpf, fig, magnitude_limit=18): """"""Make the Gaia Figure Elements"""""" #Get the positions of the Gaia sources c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg') # Use pixel scale for query size pix_scale = 4.0 # arcseconds / pixel for Kepler, default if tpf.mission == 'TESS': pix_scale = 21.0 # We are querying with a diameter as the radius, overfilling by 2x. result = Vizier.query_region(c1, catalog=[""I/345/gaia2""], radius=Angle(np.max(tpf.shape[1:]) * pix_scale, ""arcsec"")) if result is None: raise ValueError('No targets found in region.') result = result[""I/345/gaia2""].to_pandas() result = result[result.Gmag < magnitude_limit] radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is this supposed to be zero or one????? year = ((tpf.astropy_time[0].jd - 2457206.375) * u.day).to(u.year) pmra = ((np.asarray(result.pmRA) * u.milliarcsecond/u.year) * year).to(u.arcsec).value pmdec = ((np.nan_to_num(np.asarray(result.pmDE)) * u.milliarcsecond/u.year) * year).to(u.arcsec).value ## todo: filter NaNs in pmra/pmdec result.RA_ICRS += pmra result.DE_ICRS += pmdec # Gently size the points by their Gaia magnitude sizes = 64.0 / 2**(result['Gmag']/5.0) source = ColumnDataSource(data=dict(ra=result['RA_ICRS'], dec=result['DE_ICRS'], source=result['Source'], Gmag=result['Gmag'], plx=result['Plx'], x=coords[:, 0]+tpf.column, y=coords[:, 1]+tpf.row, size=sizes)) r = fig.circle('x', 'y', source=source,fill_alpha=0.3, size='size', line_color=None, selection_color=""firebrick"",nonselection_fill_alpha=0.0, nonselection_line_color=None, nonselection_line_alpha=0.0, fill_color=""firebrick"", hover_fill_color=""firebrick"", hover_alpha=0.9, hover_line_color=""white"") fig.add_tools(HoverTool(tooltips=[(""Source"", ""@source""),(""G"", ""@Gmag""),(""Parallax"", ""@plx""), (""RA"", ""@ra{0,0.00000000}""), (""DEC"", ""@dec{0,0.00000000}""), (""x"", ""@x""), (""y"", ""@y"")], renderers=[r], mode='mouse', point_policy=""snap_to_data"")) return fig, r " 56510,"def get_data_by_tag_and_table_name( conn: ConnectionPlus, tag: str, table_name: str ) -> Optional[VALUE]: """"""Get data under the tag from table returns None if the column is missing"""""" try: data = select_one_where(conn, ""runs"", tag, ""result_table_name"", table_name) except RuntimeError as e: # all errors trigger an runtime error here since select_one_where is wrapped # in an atomic that will do a rollback # this probably just means that the column is not there # and therefore it contains no data if str(e.__cause__).startswith(""no such column""): data = None else: raise e return data ","def get_data_by_tag_and_table_name( conn: ConnectionPlus, tag: str, table_name: str ) -> Optional[VALUE]: """""" Get data from the ""tag"" column for the row in ""runs"" table where ""result_table_name"" matches ""table_name"". Returns None if the ""tag"" column is missing in ""runs"" table. """""" try: data = select_one_where(conn, ""runs"", tag, ""result_table_name"", table_name) except RuntimeError as e: # all errors trigger an runtime error here since select_one_where is wrapped # in an atomic that will do a rollback # this probably just means that the column is not there # and therefore it contains no data if str(e.__cause__).startswith(""no such column""): data = None else: raise e return data " 12812,"def delegate(parsed_arguments): if not parsed_arguments.delegatee: raise exceptions.Error( '--delegatee must be set to perform the delegation.') if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): raise exceptions.Error( 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) if not parsed_arguments.pubkeys: raise exceptions.Error( '--pubkeys must be set to perform the delegation.') public_keys = [] for public_key in parsed_arguments.pubkeys: imported_pubkey = import_publickey_from_file(public_key) public_keys.append(imported_pubkey) repository = repo_tool.load_repository( os.path.join(parsed_arguments.path, REPO_DIR)) if parsed_arguments.role == 'targets': repository.targets.delegate(parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Use key from --sign if present or use the default target_key for key in parsed_arguments.sign or [os.path.join(KEYSTORE_DIR, TARGETS_KEY_NAME)]: targets_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key), parsed_arguments.targets_pw ) repository.targets.load_signing_key(targets_private) # Generate the delegate targets file for key in parsed_arguments.pubkeys: delegate_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key[:-4]), parsed_arguments.pw, ) repository.targets(parsed_arguments.delegatee).load_signing_key(delegate_private) # A delegated (non-top-level-Targets) role. else: repository.targets(parsed_arguments.role).delegate( parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Update the required top-level roles, Snapshot and Timestamp, to make a new # release. Automatically making a new release can be disabled via # --no_release. if not parsed_arguments.no_release: snapshot_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) timestamp_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) repository.snapshot.load_signing_key(snapshot_private) repository.timestamp.load_signing_key(timestamp_private) consistent_snapshot = roledb.get_roleinfo('root', repository._repository_name)['consistent_snapshot'] repository.writeall(consistent_snapshot=consistent_snapshot) # Move staged metadata directory to ""live"" metadata directory. write_to_live_repo(parsed_arguments) ","def delegate(parsed_arguments): if not parsed_arguments.delegatee: raise exceptions.Error( '--delegatee must be set to perform the delegation.') if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): raise exceptions.Error( 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) if not parsed_arguments.pubkeys: raise exceptions.Error( '--pubkeys must be set to perform the delegation.') public_keys = [] for public_key in parsed_arguments.pubkeys: imported_pubkey = import_publickey_from_file(public_key) public_keys.append(imported_pubkey) repository = repo_tool.load_repository( os.path.join(parsed_arguments.path, REPO_DIR)) if parsed_arguments.role == 'targets': repository.targets.delegate(parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Use key from --sign if present or use the default target_key for key in parsed_arguments.sign or [os.path.join(KEYSTORE_DIR, TARGETS_KEY_NAME)]: targets_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key), parsed_arguments.targets_pw ) repository.targets.load_signing_key(targets_private) # Generate the delegate targets file for key in parsed_arguments.pubkeys: delegate_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, key[:-4]), parsed_arguments.pw, ) repository.targets(parsed_arguments.delegatee).load_signing_key(delegatee_private) # A delegated (non-top-level-Targets) role. else: repository.targets(parsed_arguments.role).delegate( parsed_arguments.delegatee, public_keys, parsed_arguments.delegate, parsed_arguments.threshold, parsed_arguments.terminating, list_of_targets=None, path_hash_prefixes=None) # Update the required top-level roles, Snapshot and Timestamp, to make a new # release. Automatically making a new release can be disabled via # --no_release. if not parsed_arguments.no_release: snapshot_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) timestamp_private = import_privatekey_from_file( os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) repository.snapshot.load_signing_key(snapshot_private) repository.timestamp.load_signing_key(timestamp_private) consistent_snapshot = roledb.get_roleinfo('root', repository._repository_name)['consistent_snapshot'] repository.writeall(consistent_snapshot=consistent_snapshot) # Move staged metadata directory to ""live"" metadata directory. write_to_live_repo(parsed_arguments) " 40529,"def update_agents(cmd, client, resource_group_name, cluster_name, location=None, https_proxy="""", http_proxy="""", no_proxy="""", kube_config=None, kube_context=None, no_wait=False, tags=None): logger.warning(""Ensure that you have the latest helm version installed before proceeding."") logger.warning(""This operation might take a while...\n"") # Setting user profile profile = Profile(cli_ctx=cmd.cli_ctx) # Setting kubeconfig kube_config = set_kube_config(kube_config) # Removing quotes from kubeconfig path. This is necessary for windows OS. trim_kube_config(kube_config) # Escaping comma, forward slash present in https proxy urls, needed for helm params. https_proxy = escape_proxy_settings(https_proxy) # Escaping comma, forward slash present in http proxy urls, needed for helm params. http_proxy = escape_proxy_settings(http_proxy) # Escaping comma, forward slash present in no proxy urls, needed for helm params. no_proxy = escape_proxy_settings(no_proxy) # Loading the kubeconfig file in kubernetes client configuration try: config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: telemetry.set_user_fault() telemetry.set_exception(exception=e, fault_type=consts.Load_Kubeconfig_Fault_Type, summary='Problem loading the kubeconfig file') raise CLIError(""Problem loading the kubeconfig file."" + str(e)) configuration = kube_client.Configuration() # Checking the connection to kubernetes cluster. # This check was added to avoid large timeouts when connecting to AAD Enabled AKS clusters # if the user had not logged in. check_kube_connection(configuration) # Get kubernetes cluster info for telemetry kubernetes_version = get_server_version(configuration) kubernetes_distro = get_kubernetes_distro(configuration) kubernetes_properties = { 'Context.Default.AzureCLI.KubernetesVersion': kubernetes_version, 'Context.Default.AzureCLI.KubernetesDistro': kubernetes_distro } telemetry.add_extension_event('connectedk8s', kubernetes_properties) # Checking helm installation check_helm_install(kube_config, kube_context) # Check helm version helm_version = check_helm_version(kube_config, kube_context) telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.HelmVersion': helm_version}) # Validate location utils.validate_location(cmd, location) # Check whether Connected Cluster is present if not connected_cluster_exists(client, resource_group_name, cluster_name): telemetry.set_user_fault() telemetry.set_exception(exception='The connected cluster resource does not exist', fault_type=consts.Resource_Does_Not_Exist_Fault_Type, summary='Connected cluster resource does not exist') raise CLIError(""The connected cluster resource {} does not exist "".format(cluster_name) + ""in the resource group {} "".format(resource_group_name) + ""Please onboard the connected cluster using az connectedk8s command"") # Fetch Connected Cluster for agent version connected_cluster = get_connectedk8s(cmd, client, resource_group_name, cluster_name) # Adding helm repo utils.add_helm_repo(kube_config, kube_context) # Retrieving Helm chart OCI Artifact location registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(profile, location) # Set agent version in registry path if connected_cluster.agent_version is not None: if not utils.is_update_allowed(connected_cluster.agent_version): telemetry.set_user_fault() message = str.format(consts.Update_Not_Allowed, ""0.2.5"", ""0.1.214-dev"") telemetry.set_exception(exception=message, fault_type=consts.Update_Not_Allowed_Fault_Type, summary='Update not allowed') raise CLIError(message) registry_chart_path = registry_path.split(':')[0] registry_path = registry_chart_path + "":"" + connected_cluster.agent_version agent_version = registry_path.split(':')[1] telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': agent_version}) # Get Helm chart path chart_path = utils.get_chart_path(registry_path, kube_config, kube_context) cmd_helm_upgrade = [""helm"", ""upgrade"", ""azure-arc"", chart_path, ""--reuse-values"", ""--set"", ""global.httpsProxy={}"".format(https_proxy), ""--set"", ""global.httpProxy={}"".format(http_proxy), ""--set"", ""global.noProxy={}"".format(no_proxy), ""--wait"", ""--kubeconfig"", kube_config, ""--output"", ""json""] if kube_context: cmd_helm_upgrade.extend([""--kube-context"", kube_context]) response_helm_upgrade = Popen(cmd_helm_upgrade, stdout=PIPE, stderr=PIPE) _, error_helm_upgrade = response_helm_upgrade.communicate() if response_helm_upgrade.returncode != 0: telemetry.set_exception(exception=error_helm_upgrade.decode(""ascii""), fault_type=consts.Install_HelmRelease_Fault_Type, summary='Unable to install helm release') raise CLIError(str.format(consts.Update_Agent_Failure, error_helm_upgrade.decode(""ascii""))) return str.format(consts.Update_Agent_Success, connected_cluster.name) ","def update_agents(cmd, client, resource_group_name, cluster_name, location=None, https_proxy="""", http_proxy="""", no_proxy="""", kube_config=None, kube_context=None, no_wait=False, tags=None): logger.warning(""Ensure that you have the latest helm version installed before proceeding."") logger.warning(""This operation might take a while...\n"") # Setting user profile profile = Profile(cli_ctx=cmd.cli_ctx) # Setting kubeconfig kube_config = set_kube_config(kube_config) # Removing quotes from kubeconfig path. This is necessary for windows OS. trim_kube_config(kube_config) # Escaping comma, forward slash present in https proxy urls, needed for helm params. https_proxy = escape_proxy_settings(https_proxy) # Escaping comma, forward slash present in http proxy urls, needed for helm params. http_proxy = escape_proxy_settings(http_proxy) # Escaping comma, forward slash present in no proxy urls, needed for helm params. no_proxy = escape_proxy_settings(no_proxy) # Loading the kubeconfig file in kubernetes client configuration try: config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: telemetry.set_user_fault() telemetry.set_exception(exception=e, fault_type=consts.Load_Kubeconfig_Fault_Type, summary='Problem loading the kubeconfig file') raise CLIError(""Problem loading the kubeconfig file."" + str(e)) configuration = kube_client.Configuration() # Checking the connection to kubernetes cluster. # This check was added to avoid large timeouts when connecting to AAD Enabled AKS clusters # if the user had not logged in. check_kube_connection(configuration) # Get kubernetes cluster info for telemetry kubernetes_version = get_server_version(configuration) kubernetes_distro = get_kubernetes_distro(configuration) kubernetes_properties = { 'Context.Default.AzureCLI.KubernetesVersion': kubernetes_version, 'Context.Default.AzureCLI.KubernetesDistro': kubernetes_distro } telemetry.add_extension_event('connectedk8s', kubernetes_properties) # Checking helm installation check_helm_install(kube_config, kube_context) # Check helm version helm_version = check_helm_version(kube_config, kube_context) telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.HelmVersion': helm_version}) # Validate location utils.validate_location(cmd, location) # Check whether Connected Cluster is present if not connected_cluster_exists(client, resource_group_name, cluster_name): telemetry.set_user_fault() telemetry.set_exception(exception='The connected cluster resource does not exist', fault_type=consts.Resource_Does_Not_Exist_Fault_Type, summary='Connected cluster resource does not exist') raise CLIError(""The connected cluster resource {} does not exist "".format(cluster_name) + ""in the resource group {} "".format(resource_group_name) + ""Please onboard the connected cluster using: az connectedk8s connect -n -g "") # Fetch Connected Cluster for agent version connected_cluster = get_connectedk8s(cmd, client, resource_group_name, cluster_name) # Adding helm repo utils.add_helm_repo(kube_config, kube_context) # Retrieving Helm chart OCI Artifact location registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(profile, location) # Set agent version in registry path if connected_cluster.agent_version is not None: if not utils.is_update_allowed(connected_cluster.agent_version): telemetry.set_user_fault() message = str.format(consts.Update_Not_Allowed, ""0.2.5"", ""0.1.214-dev"") telemetry.set_exception(exception=message, fault_type=consts.Update_Not_Allowed_Fault_Type, summary='Update not allowed') raise CLIError(message) registry_chart_path = registry_path.split(':')[0] registry_path = registry_chart_path + "":"" + connected_cluster.agent_version agent_version = registry_path.split(':')[1] telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': agent_version}) # Get Helm chart path chart_path = utils.get_chart_path(registry_path, kube_config, kube_context) cmd_helm_upgrade = [""helm"", ""upgrade"", ""azure-arc"", chart_path, ""--reuse-values"", ""--set"", ""global.httpsProxy={}"".format(https_proxy), ""--set"", ""global.httpProxy={}"".format(http_proxy), ""--set"", ""global.noProxy={}"".format(no_proxy), ""--wait"", ""--kubeconfig"", kube_config, ""--output"", ""json""] if kube_context: cmd_helm_upgrade.extend([""--kube-context"", kube_context]) response_helm_upgrade = Popen(cmd_helm_upgrade, stdout=PIPE, stderr=PIPE) _, error_helm_upgrade = response_helm_upgrade.communicate() if response_helm_upgrade.returncode != 0: telemetry.set_exception(exception=error_helm_upgrade.decode(""ascii""), fault_type=consts.Install_HelmRelease_Fault_Type, summary='Unable to install helm release') raise CLIError(str.format(consts.Update_Agent_Failure, error_helm_upgrade.decode(""ascii""))) return str.format(consts.Update_Agent_Success, connected_cluster.name) " 26114,"def batch_id(batch: PrePrepare): return BatchID(batch.viewNo, batch.ppSeqNo, batch.digest) ","def batch_id(batch: PrePrepare) -> BatchID: return BatchID(batch.viewNo, batch.ppSeqNo, batch.digest) " 26595,"def do_setup(): """"""Perform the Airflow package setup."""""" write_version() setup( name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packages=find_packages(exclude=['tests*']), package_data={ '': ['airflow/alembic.ini', ""airflow/git_version""], 'airflow.serialization': [""*.json""], }, include_package_data=True, zip_safe=False, scripts=['airflow/bin/airflow'], ##################################################################################################### # IMPORTANT NOTE!!!!!!!!!!!!!!! # IF you are removing dependencies from this list, please make sure that you also increase # DEPENDENCIES_EPOCH_NUMBER in the Dockerfile ##################################################################################################### install_requires=[ 'alembic>=1.0, <2.0', 'argcomplete~=1.10', 'cached_property~=1.5', 'colorlog==4.0.2', 'croniter>=0.3.17, <0.4', 'cryptography>=0.9.3', 'dill>=0.2.2, <0.4', 'flask>=1.1.0, <2.0', ""flask-appbuilder>=2.2.0, <2.3.0"", 'flask-caching>=1.3.3, <1.4.0', 'flask-login>=0.3, <0.5', 'flask-swagger==0.2.13', 'flask-wtf>=0.14.2, <0.15', 'funcsigs>=1.0.0, <2.0.0', 'graphviz>=0.12', 'gunicorn>=19.5.0, <20.0', 'iso8601>=0.1.12', 'jsonschema~=3.0', 'json-merge-patch==0.2', 'jinja2>=2.10.1, <2.11.0', 'lazy_object_proxy~=1.3', 'markdown>=2.5.2, <3.0', 'marshmallow-sqlalchemy>=0.16.1, <0.19.0', 'pandas>=0.17.1, <1.0.0', 'pendulum==1.4.4', 'psutil>=4.2.0, <6.0.0', 'pygments>=2.0.1, <3.0', 'python-daemon>=2.1.1, <2.2', 'python-dateutil>=2.3, <3', 'requests>=2.20.0, <3', 'setproctitle>=1.1.8, <2', 'sqlalchemy~=1.3', 'tabulate>=0.7.5, <0.9', 'tenacity==4.12.0', 'termcolor==1.1.0', 'text-unidecode==1.2', 'typing;python_version<""3.6""', 'thrift>=0.9.2', 'tzlocal>=1.4,<2.0.0', 'unicodecsv>=0.14.1', 'zope.deprecation>=4.0, <5.0', 'typing-extensions>=3.7.4;python_version<""3.8""', ], ##################################################################################################### # IMPORTANT NOTE!!!!!!!!!!!!!!! # IF you are removing dependencies from this list, please make sure that you also increase # DEPENDENCIES_EPOCH_NUMBER in the Dockerfile ##################################################################################################### setup_requires=[ 'docutils>=0.14, <1.0', 'gitpython>=2.0.2', ], extras_require={ 'all': devel_all, 'devel_ci': devel_ci, 'all_dbs': all_dbs, 'atlas': atlas, 'async': async_packages, 'aws': aws, 'azure': azure, 'cassandra': cassandra, 'celery': celery, 'cgroups': cgroups, 'cloudant': cloudant, 'dask': dask, 'databricks': databricks, 'datadog': datadog, 'devel': devel_minreq, 'devel_hadoop': devel_hadoop, 'doc': doc, 'docker': docker, 'druid': druid, 'elasticsearch': elasticsearch, 'gcp': gcp, 'gcp_api': gcp, # TODO: remove this in Airflow 2.1 'github_enterprise': flask_oauth, 'google_auth': flask_oauth, 'grpc': grpc, 'hdfs': hdfs, 'hive': hive, 'jdbc': jdbc, 'jira': jira, 'kerberos': kerberos, 'kubernetes': kubernetes, 'ldap': ldap, 'mongo': mongo, 'mssql': mssql, 'mysql': mysql, 'oracle': oracle, 'papermill': papermill, 'password': password, 'pinot': pinot, 'postgres': postgres, 'qds': qds, 'rabbitmq': rabbitmq, 'redis': redis, 'salesforce': salesforce, 'samba': samba, 'sendgrid': sendgrid, 'sentry': sentry, 'segment': segment, 'slack': slack, 'pagerduty': pagerduty, 'snowflake': snowflake, 'ssh': ssh, 'statsd': statsd, 'vertica': vertica, 'webhdfs': webhdfs, 'winrm': winrm, }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: System :: Monitoring', ], author='Apache Software Foundation', author_email='dev@airflow.apache.org', url='http://airflow.apache.org/', download_url=( 'https://dist.apache.org/repos/dist/release/airflow/' + version), cmdclass={ 'extra_clean': CleanCommand, 'compile_assets': CompileAssets }, test_suite='setup.airflow_test_suite', python_requires='~=3.6', ) ","def do_setup(): """"""Perform the Airflow package setup."""""" write_version() setup( name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packages=find_packages(exclude=['tests*']), package_data={ '': ['airflow/alembic.ini', ""airflow/git_version""], 'airflow.serialization': [""*.json""], }, include_package_data=True, zip_safe=False, scripts=['airflow/bin/airflow'], ##################################################################################################### # IMPORTANT NOTE!!!!!!!!!!!!!!! # IF you are removing dependencies from this list, please make sure that you also increase # DEPENDENCIES_EPOCH_NUMBER in the Dockerfile ##################################################################################################### install_requires=[ 'alembic>=1.0, <2.0', 'argcomplete~=1.10', 'cached_property~=1.5', 'colorlog==4.0.2', 'croniter>=0.3.17, <0.4', 'cryptography>=0.9.3', 'dill>=0.2.2, <0.4', 'flask>=1.1.0, <2.0', 'flask-appbuilder>=2.2.0, <2.3.0', 'flask-caching>=1.3.3, <1.4.0', 'flask-login>=0.3, <0.5', 'flask-swagger==0.2.13', 'flask-wtf>=0.14.2, <0.15', 'funcsigs>=1.0.0, <2.0.0', 'graphviz>=0.12', 'gunicorn>=19.5.0, <20.0', 'iso8601>=0.1.12', 'jsonschema~=3.0', 'json-merge-patch==0.2', 'jinja2>=2.10.1, <2.11.0', 'lazy_object_proxy~=1.3', 'markdown>=2.5.2, <3.0', 'marshmallow-sqlalchemy>=0.16.1, <0.19.0', 'pandas>=0.17.1, <1.0.0', 'pendulum==1.4.4', 'psutil>=4.2.0, <6.0.0', 'pygments>=2.0.1, <3.0', 'python-daemon>=2.1.1, <2.2', 'python-dateutil>=2.3, <3', 'requests>=2.20.0, <3', 'setproctitle>=1.1.8, <2', 'sqlalchemy~=1.3', 'tabulate>=0.7.5, <0.9', 'tenacity==4.12.0', 'termcolor==1.1.0', 'text-unidecode==1.2', 'typing;python_version<""3.6""', 'thrift>=0.9.2', 'tzlocal>=1.4,<2.0.0', 'unicodecsv>=0.14.1', 'zope.deprecation>=4.0, <5.0', 'typing-extensions>=3.7.4;python_version<""3.8""', ], ##################################################################################################### # IMPORTANT NOTE!!!!!!!!!!!!!!! # IF you are removing dependencies from this list, please make sure that you also increase # DEPENDENCIES_EPOCH_NUMBER in the Dockerfile ##################################################################################################### setup_requires=[ 'docutils>=0.14, <1.0', 'gitpython>=2.0.2', ], extras_require={ 'all': devel_all, 'devel_ci': devel_ci, 'all_dbs': all_dbs, 'atlas': atlas, 'async': async_packages, 'aws': aws, 'azure': azure, 'cassandra': cassandra, 'celery': celery, 'cgroups': cgroups, 'cloudant': cloudant, 'dask': dask, 'databricks': databricks, 'datadog': datadog, 'devel': devel_minreq, 'devel_hadoop': devel_hadoop, 'doc': doc, 'docker': docker, 'druid': druid, 'elasticsearch': elasticsearch, 'gcp': gcp, 'gcp_api': gcp, # TODO: remove this in Airflow 2.1 'github_enterprise': flask_oauth, 'google_auth': flask_oauth, 'grpc': grpc, 'hdfs': hdfs, 'hive': hive, 'jdbc': jdbc, 'jira': jira, 'kerberos': kerberos, 'kubernetes': kubernetes, 'ldap': ldap, 'mongo': mongo, 'mssql': mssql, 'mysql': mysql, 'oracle': oracle, 'papermill': papermill, 'password': password, 'pinot': pinot, 'postgres': postgres, 'qds': qds, 'rabbitmq': rabbitmq, 'redis': redis, 'salesforce': salesforce, 'samba': samba, 'sendgrid': sendgrid, 'sentry': sentry, 'segment': segment, 'slack': slack, 'pagerduty': pagerduty, 'snowflake': snowflake, 'ssh': ssh, 'statsd': statsd, 'vertica': vertica, 'webhdfs': webhdfs, 'winrm': winrm, }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: System :: Monitoring', ], author='Apache Software Foundation', author_email='dev@airflow.apache.org', url='http://airflow.apache.org/', download_url=( 'https://dist.apache.org/repos/dist/release/airflow/' + version), cmdclass={ 'extra_clean': CleanCommand, 'compile_assets': CompileAssets }, test_suite='setup.airflow_test_suite', python_requires='~=3.6', ) " 16730,"def _format_mac(mac_address: str) -> str: """"""Format a mac address for matching."""""" return format_mac(mac_address).replace("":"", """") ","def _format_mac(mac_address): """"""Format a mac address for matching."""""" return format_mac(mac_address).replace("":"", """") " 59204,"def main(): import os from optparse import OptionParser usage = ""profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."" parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest=""outfile"", help=""Save stats to "", default=None) parser.add_option('-m', dest=""module"", action=""store_true"", help=""Profile a library module."", default=False) parser.add_option('-s', '--sort', dest=""sort"", help=""Sort order when printing to stdout, based on pstats.Stats class"", default=-1) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args # the script that we're profiling may chdir, so capture the absolute path # to the output file at startup if options.outfile is not None: options.outfile = os.path.abspath(options.outfile) if len(args) > 0: if options.module: import runpy code = ""run_module(modname, run_name='__main__')"" globs = { 'run_module': runpy.run_module, 'modname': args[0] } else: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') globs = { '__file__': progname, '__name__': '__main__', '__package__': None, '__cached__': None, } runctx(code, globs, None, options.outfile, options.sort) else: parser.print_usage() return parser ","def main(): import os from optparse import OptionParser usage = ""profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."" parser = OptionParser(usage=usage) parser.allow_interspersed_args = False parser.add_option('-o', '--outfile', dest=""outfile"", help=""Save stats to "", default=None) parser.add_option('-m', dest=""module"", action=""store_true"", help=""Profile a library module."", default=False) parser.add_option('-s', '--sort', dest=""sort"", help=""Sort order when printing to stdout, based on pstats.Stats class"", default=-1) if not sys.argv[1:]: parser.print_usage() sys.exit(2) (options, args) = parser.parse_args() sys.argv[:] = args # The script that we're profiling may chdir, so capture the absolute path # to the output file at startup. if options.outfile is not None: options.outfile = os.path.abspath(options.outfile) if len(args) > 0: if options.module: import runpy code = ""run_module(modname, run_name='__main__')"" globs = { 'run_module': runpy.run_module, 'modname': args[0] } else: progname = args[0] sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') globs = { '__file__': progname, '__name__': '__main__', '__package__': None, '__cached__': None, } runctx(code, globs, None, options.outfile, options.sort) else: parser.print_usage() return parser " 13877,"def print_json_report(covdata, options): if options.output: OUTPUT = open(options.output, 'w') else: OUTPUT = sys.stdout total_lines = 0 total_covered = 0 json_dict = {} json_dict['current_working_directory'] = options.root json_dict['format_version'] = FORMAT_VERSION json_dict['files'] = [] # Data keys = sort_coverage( covdata, show_branch=options.show_branch, by_num_uncovered=options.sort_uncovered, by_percent_uncovered=options.sort_percent) def _summarize_file_coverage(coverage): filename_str = options.root_filter.sub('', coverage.filename) if not coverage.filename.endswith(filename_str): # Do no truncation if the filter does not start matching at # the beginning of the string filename_str = coverage.filename filename_str = filename_str.replace('\\', '/') if options.show_branch: total, cover, percent = coverage.branch_coverage() uncovered_lines = coverage.uncovered_branches_str() else: total, cover, percent = coverage.line_coverage() uncovered_lines = coverage.uncovered_lines_str() return (filename_str, total, cover, percent, uncovered_lines) for key in keys: (filename, t, n, percent, uncovered_lines) = _summarize_file_coverage(covdata[key]) total_lines += t total_covered += n json_dict['files'].append({ 'file': filename, 'total': t, 'covered': n, 'percent': percent, }) # Footer & summary percent = calculate_coverage(total_covered, total_lines, nan_value=None) json_dict['total'] = total_lines json_dict['covered'] = total_covered json_dict['percent'] = percent if options.json_pretty_summary: json_str = json.dumps(json_dict, indent=PRETTY_JSON_INDENT) else: json_str = json.dumps(json_dict) OUTPUT.write(json_str) # Close logfile if options.output: OUTPUT.close() ","def print_json_summary_report(covdata, options): if options.output: OUTPUT = open(options.output, 'w') else: OUTPUT = sys.stdout total_lines = 0 total_covered = 0 json_dict = {} json_dict['current_working_directory'] = options.root json_dict['format_version'] = FORMAT_VERSION json_dict['files'] = [] # Data keys = sort_coverage( covdata, show_branch=options.show_branch, by_num_uncovered=options.sort_uncovered, by_percent_uncovered=options.sort_percent) def _summarize_file_coverage(coverage): filename_str = options.root_filter.sub('', coverage.filename) if not coverage.filename.endswith(filename_str): # Do no truncation if the filter does not start matching at # the beginning of the string filename_str = coverage.filename filename_str = filename_str.replace('\\', '/') if options.show_branch: total, cover, percent = coverage.branch_coverage() uncovered_lines = coverage.uncovered_branches_str() else: total, cover, percent = coverage.line_coverage() uncovered_lines = coverage.uncovered_lines_str() return (filename_str, total, cover, percent, uncovered_lines) for key in keys: (filename, t, n, percent, uncovered_lines) = _summarize_file_coverage(covdata[key]) total_lines += t total_covered += n json_dict['files'].append({ 'file': filename, 'total': t, 'covered': n, 'percent': percent, }) # Footer & summary percent = calculate_coverage(total_covered, total_lines, nan_value=None) json_dict['total'] = total_lines json_dict['covered'] = total_covered json_dict['percent'] = percent if options.json_pretty_summary: json_str = json.dumps(json_dict, indent=PRETTY_JSON_INDENT) else: json_str = json.dumps(json_dict) OUTPUT.write(json_str) # Close logfile if options.output: OUTPUT.close() " 36097,"def delete_nodes( pks: Iterable[int], verbosity: Optional[int] = None, dry_run: Union[bool, Callable[[Set[int]], bool]] = True, force: Optional[bool] = None, **traversal_rules: bool ) -> Tuple[Set[int], bool]: """"""Delete nodes given a list of ""starting"" PKs. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. .. deprecated:: 1.6.0 The `verbosity` keyword will be removed in `v2.0.0`, set the level of `DELETE_LOGGER` instead. .. deprecated:: 1.6.0 The `force` keyword will be removed in `v2.0.0`, use the `dry_run` option instead. :param pks: a list of starting PKs of the nodes to delete (the full set will be based on the traversal rules) :param dry_run: If True, return the pks to delete without deleting anything. If False, delete the pks without confirmation If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True`` :param traversal_rules: graph traversal rules. See :const:`aiida.common.links.GraphTraversalRules` for what rule names are toggleable and what the defaults are. :returns: (pks to delete, whether they were deleted) """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements if verbosity is not None: warnings.warn( 'The verbosity option is deprecated and will be removed in `aiida-core==2.0.0`. ' 'Set the level of DELETE_LOGGER instead', AiidaDeprecationWarning ) # pylint: disable=no-member if force is not None: warnings.warn( 'The force option is deprecated and will be removed in `aiida-core==2.0.0`. ' 'Use dry_run instead', AiidaDeprecationWarning ) # pylint: disable=no-member if force is True: dry_run = False def _missing_callback(_pks: Iterable[int]): for _pk in _pks: DELETE_LOGGER.warning(f'warning: node with pk<{_pk}> does not exist, skipping') pks_set_to_delete = get_nodes_delete(pks, get_links=False, missing_callback=_missing_callback, **traversal_rules)['nodes'] DELETE_LOGGER.info('%s Node(s) marked for deletion', len(pks_set_to_delete)) if pks_set_to_delete and DELETE_LOGGER.level == logging.DEBUG: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) DELETE_LOGGER.debug('Node(s) to delete:') for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string DELETE_LOGGER.debug(f' {uuid} {pk} {short_type_string} {label}') if dry_run is True: DELETE_LOGGER.info('This was a dry run, exiting without deleting anything') return (pks_set_to_delete, False) # confirm deletion if callable(dry_run) and dry_run(pks_set_to_delete): DELETE_LOGGER.info('This was a dry run, exiting without deleting anything') return (pks_set_to_delete, False) if not pks_set_to_delete: return (pks_set_to_delete, True) # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access DELETE_LOGGER.info('Starting node deletion...') delete_nodes_and_connections(pks_set_to_delete) DELETE_LOGGER.info('Nodes deleted from database, deleting files from the repository now...') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) DELETE_LOGGER.info('Deletion of nodes completed.') return (pks_set_to_delete, True) ","def delete_nodes( pks: Iterable[int], verbosity: Optional[int] = None, dry_run: Union[bool, Callable[[Set[int]], bool]] = True, force: Optional[bool] = None, **traversal_rules: bool ) -> Tuple[Set[int], bool]: """"""Delete nodes given a list of ""starting"" PKs. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the Topics - Provenance section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. .. deprecated:: 1.6.0 The `verbosity` keyword will be removed in `v2.0.0`, set the level of `DELETE_LOGGER` instead. .. deprecated:: 1.6.0 The `force` keyword will be removed in `v2.0.0`, use the `dry_run` option instead. :param pks: a list of starting PKs of the nodes to delete (the full set will be based on the traversal rules) :param dry_run: If True, return the pks to delete without deleting anything. If False, delete the pks without confirmation If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True`` :param traversal_rules: graph traversal rules. See :const:`aiida.common.links.GraphTraversalRules` for what rule names are toggleable and what the defaults are. :returns: (pks to delete, whether they were deleted) """""" # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements if verbosity is not None: warnings.warn( 'The verbosity option is deprecated and will be removed in `aiida-core==2.0.0`. ' 'Set the level of DELETE_LOGGER instead', AiidaDeprecationWarning ) # pylint: disable=no-member if force is not None: warnings.warn( 'The force option is deprecated and will be removed in `aiida-core==2.0.0`. ' 'Use dry_run instead', AiidaDeprecationWarning ) # pylint: disable=no-member if force is True: dry_run = False def _missing_callback(_pks: Iterable[int]): for _pk in _pks: DELETE_LOGGER.warning(f'warning: node with pk<{_pk}> does not exist, skipping') pks_set_to_delete = get_nodes_delete(pks, get_links=False, missing_callback=_missing_callback, **traversal_rules)['nodes'] DELETE_LOGGER.info('%s Node(s) marked for deletion', len(pks_set_to_delete)) if pks_set_to_delete and DELETE_LOGGER.level == logging.DEBUG: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) DELETE_LOGGER.debug('Node(s) to delete:') for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string DELETE_LOGGER.debug(f' {uuid} {pk} {short_type_string} {label}') if dry_run is True: DELETE_LOGGER.info('This was a dry run, exiting without deleting anything') return (pks_set_to_delete, False) # confirm deletion if callable(dry_run) and dry_run(pks_set_to_delete): DELETE_LOGGER.info('This was a dry run, exiting without deleting anything') return (pks_set_to_delete, False) if not pks_set_to_delete: return (pks_set_to_delete, True) # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access DELETE_LOGGER.info('Starting node deletion...') delete_nodes_and_connections(pks_set_to_delete) DELETE_LOGGER.info('Nodes deleted from database, deleting files from the repository now...') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) DELETE_LOGGER.info('Deletion of nodes completed.') return (pks_set_to_delete, True) " 45979,"def sampson_epipolar_distance( pts1: torch.Tensor, pts2: torch.Tensor, Fm: torch.Tensor, squared: bool = True, eps: float = 1e-8 ) -> torch.Tensor: r""""""Return Sampson distance for correspondences given the fundamental matrix. Args: pts1: correspondences from the left images with shape (*, N, 2 or 3). If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape (*, N, 2 or 3). If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:`(*, 3, 3)`. Called Fm to avoid ambiguity with torch.nn.functional. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed Sampson distance with shape :math:`(*, N)`. """""" if not isinstance(Fm, torch.Tensor): raise TypeError(f""Fm type is not a torch.Tensor. Got {type(Fm)}"") if (len(Fm.shape) < 3) or not Fm.shape[-2:] == (3, 3): raise ValueError(f""Fm must be a (*, 3, 3) tensor. Got {Fm.shape}"") if pts1.shape[-1] == 2: pts1 = convert_points_to_homogeneous(pts1) if pts2.shape[-1] == 2: pts2 = convert_points_to_homogeneous(pts2) # From Hartley and Zisserman, Sampson error (11.9) # sam = (x'^T F x) ** 2 / ( (((Fx)_1**2) + (Fx)_2**2)) + (((F^Tx')_1**2) + (F^Tx')_2**2)) ) # line1_in_2: torch.Tensor = (F @ pts1.transpose(dim0=-2, dim1=-1)).transpose(dim0=-2, dim1=-1) # line2_in_1: torch.Tensor = (F.transpose(dim0=-2, dim1=-1) @ pts2.transpose(dim0=-2, dim1=-1)).transpose( # dim0=-2, dim1=-1 # ) # Instead we can just transpose F once and switch the order of multiplication F_t: torch.Tensor = Fm.transpose(dim0=-2, dim1=-1) line1_in_2: torch.Tensor = pts1 @ F_t line2_in_1: torch.Tensor = pts2 @ Fm # numerator = (x'^T F x) ** 2 numerator: torch.Tensor = (pts2 * line1_in_2).sum(dim=-1).pow(2) # denominator = (((Fx)_1**2) + (Fx)_2**2)) + (((F^Tx')_1**2) + (F^Tx')_2**2)) denominator: torch.Tensor = line1_in_2[..., :2].norm(2, dim=-1).pow(2) + line2_in_1[..., :2].norm(2, dim=-1).pow(2) out: torch.Tensor = numerator / denominator if squared: return out return (out + eps).sqrt() ","def sampson_epipolar_distance( pts1: torch.Tensor, pts2: torch.Tensor, Fm: torch.Tensor, squared: bool = True, eps: float = 1e-8 ) -> torch.Tensor: r""""""Return Sampson distance for correspondences given the fundamental matrix. Args: pts1: correspondences from the left images with shape (*, N, 2 or 3). If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape (*, N, 2 or 3). If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:`(*, 3, 3)`. Called Fm to avoid ambiguity with torch.nn.functional. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed Sampson distance with shape :math:`(*, N)`. """""" if not isinstance(Fm, torch.Tensor): raise TypeError(f""Fm type is not a torch.Tensor. Got {type(Fm)}"") if (len(Fm.shape) < 3) or not Fm.shape[-2:] == (3, 3): raise ValueError(f""Fm must be a (*, 3, 3) tensor. Got {Fm.shape}"") if pts1.shape[-1] == 2: pts1 = convert_points_to_homogeneous(pts1) if pts2.shape[-1] == 2: pts2 = convert_points_to_homogeneous(pts2) # From Hartley and Zisserman, Sampson error (11.9) # sam = (x'^T F x) ** 2 / ( (((Fx)_1**2) + (Fx)_2**2)) + (((F^Tx')_1**2) + (F^Tx')_2**2)) ) # line1_in_2: torch.Tensor = (F @ pts1.transpose(dim0=-2, dim1=-1)).transpose(dim0=-2, dim1=-1) # line2_in_1: torch.Tensor = (F.transpose(dim0=-2, dim1=-1) @ pts2.transpose(dim0=-2, dim1=-1)).transpose( # dim0=-2, dim1=-1 # ) # Instead we can just transpose F once and switch the order of multiplication F_t: torch.Tensor = Fm.transpose(dim0=-2, dim1=-1) line1_in_2: torch.Tensor = pts1 @ F_t line2_in_1: torch.Tensor = pts2 @ Fm # numerator = (x'^T F x) ** 2 numerator: torch.Tensor = (pts2 * line1_in_2).sum(-1).pow(2) # denominator = (((Fx)_1**2) + (Fx)_2**2)) + (((F^Tx')_1**2) + (F^Tx')_2**2)) denominator: torch.Tensor = line1_in_2[..., :2].norm(2, dim=-1).pow(2) + line2_in_1[..., :2].norm(2, dim=-1).pow(2) out: torch.Tensor = numerator / denominator if squared: return out return (out + eps).sqrt() " 14069,"def check_equality(left, right, check_less_precise): if check_less_precise: if not geom_almost_equals(left, right): unequal_geoms = left[left.geom_almost_equals(right)] raise AssertionError( f""{len(unequal_geoms)} out of {len(left)} geometries"" f"" are not almost equal. These geometries are "" f""not almost equal: {unequal_geoms}"" ) else: if not geom_equals(left, right): unequal_geoms = left[left.geom_equals(right)] raise AssertionError( f""{len(unequal_geoms)} out of {len(left)} geometries"" f"" are not almost equal. These geometries are "" f""not almost equal: {unequal_geoms}"" ) ","def check_equality(left, right, check_less_precise): if check_less_precise: if not geom_almost_equals(left, right): unequal_geoms = left[left.geom_almost_equals(right)] raise AssertionError( f""{len(unequal_geoms)} out of {len(left)} geometries"" f"" are not almost equal. These geometries are "" f""not almost equal: {unequal_geoms}"" ) else: if not geom_equals(left, right): unequal_geoms = left[~left.geom_equals(right)] raise AssertionError( f""{len(unequal_geoms)} out of {len(left)} geometries"" f"" are not almost equal. These geometries are "" f""not almost equal: {unequal_geoms}"" ) " 46337,"def check_version(rule, target): if not isinstance(rule[0], type(target)): if rule > target: return False else: for i, rule_item in enumerate(rule): if rule_item > target: return False if rule_item[0]==target[0]: break return True ","def check_version(rule, target): if not isinstance(rule[0], type(target)): if rule > target: return False else: for rule_item in rule: if rule_item > target: return False if rule_item[0]==target[0]: break return True " 7306,"def is_low_contrast(image, fraction_threshold=0.05, lower_percentile=1, upper_percentile=99, method='linear'): """"""Determine if an image is low contrast. Parameters ---------- image : array-like The image under test. fraction_threshold : float, optional The low contrast fraction threshold. An image is considered low- contrast when its range of brightness spans less than this fraction of its data type's full range. [1]_ lower_percentile : float, optional Disregard values below this percentile when computing image contrast. upper_percentile : float, optional Disregard values above this percentile when computing image contrast. method : str, optional The contrast determination method. Right now the only available option is ""linear"". Returns ------- out : bool True when the image is determined to be low contrast. Notes ----- For boolean images, this function returns False only if all values are the same (the method, threshold and percentile arguments are ignored). References ---------- .. [1] https://scikit-image.org/docs/dev/user_guide/data_types.html Examples -------- >>> image = np.linspace(0, 0.04, 100) >>> is_low_contrast(image) True >>> image[-1] = 1 >>> is_low_contrast(image) True >>> is_low_contrast(image, upper_percentile=100) False """""" image = np.asanyarray(image) if image.dtype == bool: return not ((b.max() == 1) and (b.min() == 0)) if image.ndim == 3: if image.shape[2] == 4: image = rgba2rgb(image) if image.shape[2] == 3: image = rgb2gray(image) dlimits = dtype_limits(image, clip_negative=False) limits = np.percentile(image, [lower_percentile, upper_percentile]) ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0]) return ratio < fraction_threshold ","def is_low_contrast(image, fraction_threshold=0.05, lower_percentile=1, upper_percentile=99, method='linear'): """"""Determine if an image is low contrast. Parameters ---------- image : array-like The image under test. fraction_threshold : float, optional The low contrast fraction threshold. An image is considered low- contrast when its range of brightness spans less than this fraction of its data type's full range. [1]_ lower_percentile : float, optional Disregard values below this percentile when computing image contrast. upper_percentile : float, optional Disregard values above this percentile when computing image contrast. method : str, optional The contrast determination method. Right now the only available option is ""linear"". Returns ------- out : bool True when the image is determined to be low contrast. Notes ----- For boolean images, this function returns False only if all values are the same (the method, threshold, and percentile arguments are ignored). References ---------- .. [1] https://scikit-image.org/docs/dev/user_guide/data_types.html Examples -------- >>> image = np.linspace(0, 0.04, 100) >>> is_low_contrast(image) True >>> image[-1] = 1 >>> is_low_contrast(image) True >>> is_low_contrast(image, upper_percentile=100) False """""" image = np.asanyarray(image) if image.dtype == bool: return not ((b.max() == 1) and (b.min() == 0)) if image.ndim == 3: if image.shape[2] == 4: image = rgba2rgb(image) if image.shape[2] == 3: image = rgb2gray(image) dlimits = dtype_limits(image, clip_negative=False) limits = np.percentile(image, [lower_percentile, upper_percentile]) ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0]) return ratio < fraction_threshold " 17379,"def _infer_xy_labels(darray, x, y, imshow=False, rgb=None): """""" Determine x and y labels. For use in _plot2d darray must be a 2 dimensional data array, or 3d for imshow only. """""" assert x is None or x != y if imshow and darray.ndim == 3: return _infer_xy_labels_3d(darray, x, y, rgb) if x is None and y is None: if darray.ndim != 2: raise ValueError(""DataArray must be 2d"") y, x = darray.dims elif x is None: if ( y not in darray.dims and y not in darray.coords and y not in darray._level_coords ): raise ValueError(""y must be a dimension name if x is not supplied"") x = darray.dims[0] if y == darray.dims[1] else darray.dims[1] elif y is None: if ( x not in darray.dims and x not in darray.coords and x not in darray._level_coords ): raise ValueError(""x must be a dimension name if y is not supplied"") y = darray.dims[0] if x == darray.dims[1] else darray.dims[1] else: if any( k not in darray.coords and k not in darray.dims and k not in darray._level_coords for k in (x, y) ): raise ValueError(""x and y must be coordinate variables"") elif ( all(k in darray._level_coords for k in (x, y)) and darray._level_coords[x] == darray._level_coords[y] ): raise ValueError(""x and y cannot be levels of the same MultiIndex"") elif darray._level_coords.get(x, x) == darray._level_coords.get(y, y): raise ValueError(""x and y cannot be a MultiIndex and one of its levels"") return x, y ","def _infer_xy_labels(darray, x, y, imshow=False, rgb=None): """""" Determine x and y labels. For use in _plot2d darray must be a 2 dimensional data array, or 3d for imshow only. """""" assert x is None or x != y if imshow and darray.ndim == 3: return _infer_xy_labels_3d(darray, x, y, rgb) if x is None and y is None: if darray.ndim != 2: raise ValueError(""DataArray must be 2d"") y, x = darray.dims elif x is None: if ( y not in darray.dims and y not in darray.coords and y not in darray._level_coords ): raise ValueError(""y must be None, a dimension name, coordinate variable or coordinate level"") x = darray.dims[0] if y == darray.dims[1] else darray.dims[1] elif y is None: if ( x not in darray.dims and x not in darray.coords and x not in darray._level_coords ): raise ValueError(""x must be a dimension name if y is not supplied"") y = darray.dims[0] if x == darray.dims[1] else darray.dims[1] else: if any( k not in darray.coords and k not in darray.dims and k not in darray._level_coords for k in (x, y) ): raise ValueError(""x and y must be coordinate variables"") elif ( all(k in darray._level_coords for k in (x, y)) and darray._level_coords[x] == darray._level_coords[y] ): raise ValueError(""x and y cannot be levels of the same MultiIndex"") elif darray._level_coords.get(x, x) == darray._level_coords.get(y, y): raise ValueError(""x and y cannot be a MultiIndex and one of its levels"") return x, y " 46601,"def deprecated( key: str, message: str = ""'$KEY' is deprecated. Change your code and config to use '$NEW_KEY'"", *, _parent_: Container, _node_: Optional[Node], ) -> Any: from omegaconf._impl import select_node if not isinstance(key, str): raise ValueError( f""oc.deprecated: interpolation key type is not a string ({type(key).__name__})"" ) if not isinstance(message, str): raise ValueError( f""oc.deprecated: interpolation message type is not a string ({type(message).__name__})"" ) assert _node_ is not None full_key = _node_._get_full_key(key=None) target_node = select_node(_parent_, key, absolute_key=True) if target_node is None: raise ConfigKeyError( f""In oc.deprecate resolver at '{full_key}': Key not found: '{key}'"" ) new_key = target_node._get_full_key(key=None) msg = string.Template(message).safe_substitute( KEY=full_key, NEW_KEY=new_key, ) warnings.warn(category=UserWarning, message=msg) return target_node ","def deprecated( key: str, message: str = ""'$KEY' is deprecated. Change your code and config to use '$NEW_KEY'"", *, _parent_: Container, _node_: Optional[Node], ) -> Any: from omegaconf._impl import select_node if not isinstance(key, str): raise ValueError( f""oc.deprecated: interpolation key type is not a string ({type(key).__name__})"" ) if not isinstance(message, str): raise TypeError( f""oc.deprecated: interpolation message type is not a string ({type(message).__name__})"" ) assert _node_ is not None full_key = _node_._get_full_key(key=None) target_node = select_node(_parent_, key, absolute_key=True) if target_node is None: raise ConfigKeyError( f""In oc.deprecate resolver at '{full_key}': Key not found: '{key}'"" ) new_key = target_node._get_full_key(key=None) msg = string.Template(message).safe_substitute( KEY=full_key, NEW_KEY=new_key, ) warnings.warn(category=UserWarning, message=msg) return target_node " 31525,"def get_report_command(args: dict): """""" pingcastle-get-report command: Returns the last report sent by PingCastle Args: args (dict): A dict object containing the arguments for this command """""" delete_report = args.get('delete_report') == 'Yes' context = demisto.getIntegrationContext() report = context.get('report') if report is None: return 'No report available' if delete_report: context.pop('report') demisto.setIntegrationContext(context) return CommandResults( outputs_prefix='PingCastle.Report', outputs={'report': report}, raw_response=report ) ","def get_report_command(args: dict): """""" pingcastle-get-report command: Returns the last report sent by PingCastle Args: args (dict): A dict object containing the arguments for this command """""" delete_report = args.get('delete_report') == 'Yes' context = demisto.getIntegrationContext() report = context.get('report') if report is None: return 'No report available' if delete_report: context.pop('report') set_integration_context(context) return CommandResults( outputs_prefix='PingCastle.Report', outputs={'report': report}, raw_response=report ) " 44199,"def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs ","def factorize(two, tol): r""""""Return double-factorized form of a two-electron tensor. The second quantized electronic Hamiltonian is constructed in terms of fermionic creation, :math:`a^{\dagger}` , and annihilation, :math:`a`, operators as [`arXiv:1902.02134 `_] .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha}, where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as .. math:: h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right) \phi_q(r) dr, and .. math:: h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2. Rearranging the integrals in the chemist notation, [11|22], gives .. math:: H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger} a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs} V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}. with .. math:: T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}. and :math:`V` is the two-electron tensor in chemist notation. The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that .. math:: V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}. with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized and truncated in a second level of factorization. The algorithm has the following steps [`arXiv:1902.02134 `_]. 1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \ :math:`n^2 \times n^2` matrix where n is the number of orbitals. 2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \ corresponding eigenvalues larger than a threshold. 3. Reshape the selected eigenvectors to :math:`n \times n` matrices. 4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \ eigenvalues is larger than a threshold. Args: two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis arranged in chemist notation [11|22] tol (float): cutoff value for discarding the negligible factors Returns: tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron tensor, eigenvalues of the generated factors, eigenvectors of the generated factors **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772 >>> mol = qml.qchem.Molecule(symbols, geometry) >>> core, one, two = qml.qchem.electron_integrals(mol)() >>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation >>> l, w, v = factorize(two, 1e-5) >>> print(l) [[[ 1.06723440e-01 9.73575768e-15] [ 8.36288956e-15 -1.04898533e-01]] [[-2.20945401e-13 -4.25688222e-01] [-4.25688222e-01 -2.98228790e-13]] [[-8.14472856e-01 5.01669019e-13] [ 5.01689072e-13 -8.28642140e-01]]] """""" n = two.shape[0] two = two.reshape(n * n, n * n) eigvals, eigvecs = np.linalg.eigh(two) eigvals = np.array([val for val in eigvals if abs(val) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals))) factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))]) eigvals, eigvecs = np.linalg.eigh(factors) eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol]) eigvecs = eigvecs[:, -len(eigvals) :] return factors, eigvals, eigvecs " 34259,"def write_text_file( content: Text, file_path: Union[Text, Path], encoding: Text = DEFAULT_ENCODING, append: bool = False, ) -> None: """"""Writes text to a file. Args: content: The content to write. file_path: The path to which the content should be written. encoding: The encoding which should be used. append: Whether to append to the file or to truncate the file. """""" mode = ""a"" if append else ""w"" with open(file_path, mode, encoding=encoding) as file: file.write(content) ","def write_text_file( content: Text, file_path: Union[Text, Path], encoding: Text = DEFAULT_ENCODING, append: bool = False, ) -> None: """"""Writes text to a file. Args: content: The content to write. file_path: The path to which the content should be written. encoding: The encoding which should be used. append: Whether to append to the file or to truncate the file. """""" mode = ""a"" if append else ""w"" with open(file_path, mode, encoding=encoding) as file: file.write(content) " 32721,"def test_callproc_invalid(conn_cnx): """"""Test invalid callproc"""""" with conn_cnx() as cnx: with cnx.cursor() as cur: cur.execute(""drop procedure if exists output_message(varchar)"") # stored procedure does not exist with pytest.raises(ProgrammingError) as pe: cur.callproc(""output_message"") assert pe.errno == 1044 cur.execute( """""" create or replace procedure output_message(message varchar) returns varchar not null language sql as begin return message; end; """""" ) # parameters do not match the signature with pytest.raises(ProgrammingError) as pe: cur.callproc(""output_message"") assert pe.errno == 1044 with pytest.raises(TypeError): cur.callproc(""output_message"", ""test varchar"") ret = cur.callproc(""output_message"", (""test varchar"",)) assert ret == (""test varchar"",) res = cur.fetchall() assert len(res) == 1 assert len(res[0]) == 1 assert res[0][0] == ""test varchar"" ","def test_callproc_invalid(conn_cnx): """"""Test invalid callproc"""""" with conn_cnx() as cnx: with cnx.cursor() as cur: cur.execute(""drop procedure if exists output_message(varchar)"") # stored procedure does not exist with pytest.raises(ProgrammingError) as pe: cur.callproc(""output_message"") assert pe.errno == 1044 cur.execute( """""" create or replace procedure output_message(message varchar) returns varchar not null language sql as begin return message; end; """""" ) # parameters do not match the signature with pytest.raises(ProgrammingError) as pe: cur.callproc(""output_message"") assert pe.errno == 1044 with pytest.raises(TypeError): cur.callproc(""output_message"", ""test varchar"") ret = cur.callproc(""output_message"", (""test varchar"",)) assert ret == (""test varchar"",) assert cur.fetchall() == ((""test varchar"",),) " 15358,"def validate_yaml_suffix(value): """"""Validate value has a YAML suffix."""""" if not value.endswith("".yaml""): raise vol.Invalid(""Path needs to end in .yaml"") return value ","def validate_yaml_suffix(value: str) -> str: """"""Validate value has a YAML suffix."""""" if not value.endswith("".yaml""): raise vol.Invalid(""Path needs to end in .yaml"") return value " 59656,"def load_libgmt(lib_fullnames=None): """""" Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. Will look for the GMT shared library in the directories determined by clib_full_names(). Parameters ---------- lib_fullnames : list of str or None List of possible full names of GMT's shared library. If ``None``, will default to ``clib_full_names()``. Returns ------- :py:class:`ctypes.CDLL` object The loaded shared library. Raises ------ GMTCLibNotFoundError If there was any problem loading the library (couldn't find it or couldn't access the functions). """""" if lib_fullnames is None: lib_fullnames = clib_full_names() error = True error_msg = [] failing_libs = [] for libname in lib_fullnames: try: if libname in failing_libs: # libname is known to fail, so skip it continue libgmt = ctypes.CDLL(libname) check_libgmt(libgmt) error = False break except (OSError, GMTCLibError) as err: error_msg.append( f""Error loading the GMT shared library '{libname}'.\n{err}"" ) failing_libs.append(libname) if error: raise GMTCLibNotFoundError(""\n"".join(error_msg)) return libgmt ","def load_libgmt(lib_fullnames=None): """""" Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. Will look for the GMT shared library in the directories determined by clib_full_names(). Parameters ---------- lib_fullnames : list of str or None List of possible full names of GMT's shared library. If ``None``, will default to ``clib_full_names()``. Returns ------- :py:class:`ctypes.CDLL` object The loaded shared library. Raises ------ GMTCLibNotFoundError If there was any problem loading the library (couldn't find it or couldn't access the functions). """""" if lib_fullnames is None: lib_fullnames = clib_full_names() error = True error_msg = [] failing_libs = [] for libname in lib_fullnames: try: if libname not in failing_libs: # only check libname(s) that did not fail libgmt = ctypes.CDLL(libname) check_libgmt(libgmt) break except (OSError, GMTCLibError) as err: error_msg.append( f""Error loading the GMT shared library '{libname}'.\n{err}"" ) failing_libs.append(libname) if error: raise GMTCLibNotFoundError(""\n"".join(error_msg)) return libgmt " 32479,"def _get_dbot_score_and_file_entries(file_analysis_result: dict, file_metadata: dict) -> Tuple[List[dict], dict]: verdict: str = file_analysis_result.get('verdict', '') sha256 = file_metadata.get('sha256') md5 = file_metadata.get('md5') sha1 = file_metadata.get('sha1') dbot = [ { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': sha256, 'Score': dbot_score_by_verdict.get(verdict, 0) }, { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': sha1, 'Score': dbot_score_by_verdict.get(verdict, 0) }, { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': md5, 'Score': dbot_score_by_verdict.get(verdict, 0) }] file = {'SHA256': sha256, 'MD5': md5, 'SHA1': sha1, 'Metadata': file_analysis_result, 'ExistsInIntezer': True} return dbot, file ","def _get_dbot_score_and_file_entries(file_analysis_result: dict, file_metadata: dict) -> Tuple[List[dict], dict]: verdict: str = file_analysis_result.get('verdict', '') sha256 = file_analysis_result.get('sha256') md5 = file_metadata.get('md5') sha1 = file_metadata.get('sha1') dbot = [ { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': sha256, 'Score': dbot_score_by_verdict.get(verdict, 0) }, { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': sha1, 'Score': dbot_score_by_verdict.get(verdict, 0) }, { 'Vendor': 'Intezer', 'Type': 'file', 'Indicator': md5, 'Score': dbot_score_by_verdict.get(verdict, 0) }] file = {'SHA256': sha256, 'MD5': md5, 'SHA1': sha1, 'Metadata': file_analysis_result, 'ExistsInIntezer': True} return dbot, file " 12175,"def _decompose_stream(stream: io.BytesIO, *, chunk_size: int = CHUNK_SIZE_DEFAULT, chunk_count: int = CHUNK_COUNT_DEFAULT) -> bytes: """""" Break up stream into smaller, consumable chunks. :param stream: stream object or open file handle :type stream: :class:`~io.BytesIO` :param chunk_size: max size of individual chunks, defaults to `16` kilobytes :type chunk_size: int, optional :param chunk_count: number of chunks to linearly seek within stream, defaults to `None` i.e. read all of stream as chunks :type chunk_count: int, optional :return: next individual byte chunk from stream :rtype: bytes """""" byte_count = stream.getbuffer().nbytes if chunk_count is None or byte_count <= chunk_size * chunk_count: for _ in range(math.ceil(byte_count / chunk_size)): yield stream.read(chunk_size) else: for _ in range(chunk_count): stream.seek(round(byte_count/(chunk_count + 1)) - int(chunk_size / 2.), 1) yield stream.read(chunk_size) ","def _decompose_stream(stream: io.BytesIO, *, chunk_size: int = CHUNK_SIZE_DEFAULT, chunk_count: int = CHUNK_COUNT_DEFAULT) -> bytes: """""" Break up stream into smaller, consumable chunks. :param stream: stream object or open file handle :type stream: :class:`~io.BytesIO` :param chunk_size: max size of individual chunks, defaults to `16` kilobytes :type chunk_size: int, optional :param chunk_count: number of chunks to linearly seek within stream, defaults to `None` i.e. read all of stream as chunks :type chunk_count: int, optional :return: next individual byte chunk from stream :rtype: bytes """""" byte_count = stream.getbuffer().nbytes if chunk_count is None or byte_count <= chunk_size * chunk_count: for _ in range((byte_count + chunk_size - 1) // chunk_size): yield stream.read(chunk_size) else: for _ in range(chunk_count): stream.seek(round(byte_count/(chunk_count + 1)) - int(chunk_size / 2.), 1) yield stream.read(chunk_size) " 2194,"def get_file_extension(version): if 'dev' in version: # The 'dev' branch should be explictly handled return 'zip' current_version = LooseVersion(version) min_zip_version = LooseVersion('1.0.0') return 'zip' if current_version >= min_zip_version else 'pdf' ","def get_file_extension(version): if 'dev' in version: # The 'dev' branch should be explictly handled return 'zip' current_version = LooseVersion(version) min_zip_version = LooseVersion('0.24.1') return 'zip' if current_version >= min_zip_version else 'pdf' " 2624,"def make_classification( n_samples=100, n_features=20, *, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None, ): """"""Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an ``n_informative``-dimensional hypercube with sides of length ``2*class_sep`` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, ``X`` horizontally stacks features in the following order: the primary ``n_informative`` features, followed by ``n_redundant`` linear combinations of the informative features, followed by ``n_repeated`` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns ``X[:, :n_informative + n_redundant + n_repeated]``. Read more in the :ref:`User Guide `. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. These comprise ``n_informative`` informative features, ``n_redundant`` redundant features, ``n_repeated`` duplicated features and ``n_features-n_informative-n_redundant-n_repeated`` useless features drawn at random. n_informative : int, default=2 The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension ``n_informative``. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, default=2 The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, default=0 The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, default=2 The number of classes (or labels) of the classification problem. n_clusters_per_class : int, default=2 The number of clusters per class. weights : array-like of shape (n_classes,) or (n_classes - 1,),\ default=None The proportions of samples assigned to each class. If None, then classes are balanced. Note that if ``len(weights) == n_classes - 1``, then the last class weight is automatically inferred. More than ``n_samples`` samples may be returned if the sum of ``weights`` exceeds 1. Note that the actual class proportions will not exactly match ``weights`` when ``flip_y`` isn't 0. flip_y : float, default=0.01 The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. Note that the default setting flip_y > 0 might lead to less than ``n_classes`` in y in some cases. class_sep : float, default=1.0 The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : bool, default=True If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, ndarray of shape (n_features,) or None, default=0.0 Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, ndarray of shape (n_features,) or None, default=1.0 Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : bool, default=True Shuffle the samples and the features. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary `. Returns ------- X : ndarray of shape (n_samples, n_features) The generated samples. y : ndarray of shape (n_samples,) The integer labels for class membership of each sample. See Also -------- make_blobs : Simplified variant. make_multilabel_classification : Unrelated generator for multilabel tasks. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the ""Madelon"" dataset. References ---------- .. [1] I. Guyon, ""Design of experiments for the NIPS 2003 variable selection benchmark"", 2003. """""" generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError( ""Number of informative, redundant and repeated "" ""features must sum to less than the number of total"" "" features"" ) # Use log2 to avoid overflow errors if n_informative < np.log2(n_classes * n_clusters_per_class): msg = ""n_classes({}) * n_clusters_per_class({}) must be"" msg += "" smaller or equal 2**n_informative({})={}"" raise ValueError( msg.format( n_classes, n_clusters_per_class, n_informative, 2**n_informative ) ) if weights is not None: if len(weights) not in [n_classes, n_classes - 1]: raise ValueError( ""Weights specified but incompatible with number of classes."" ) if len(weights) == n_classes - 1: if isinstance(weights, list): weights = weights + [1.0 - sum(weights)] else: weights = np.resize(weights, n_classes) weights[-1] = 1.0 - sum(weights[:-1]) else: weights = [1.0 / n_classes] * n_classes n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class # Distribute samples among clusters by weight n_samples_per_cluster = [ int(n_samples * weights[k % n_classes] / n_clusters_per_class) for k in range(n_clusters) ] for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Initialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype( float, copy=False ) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.uniform(size=(n_clusters, 1)) centroids *= generator.uniform(size=(1, n_informative)) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative)) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1 X[:, n_informative : n_informative + n_redundant] = np.dot( X[:, :n_informative], B ) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp) X[:, n : n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless)) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.uniform(size=n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.uniform(size=n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.uniform(size=n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y ","def make_classification( n_samples=100, n_features=20, *, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None, ): """"""Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of an ``n_informative``-dimensional hypercube with sides of length ``2*class_sep`` and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Without shuffling, ``X`` horizontally stacks features in the following order: the primary ``n_informative`` features, followed by ``n_redundant`` linear combinations of the informative features, followed by ``n_repeated`` duplicates, drawn randomly with replacement from the informative and redundant features. The remaining features are filled with random noise. Thus, without shuffling, all useful features are contained in the columns ``X[:, :n_informative + n_redundant + n_repeated]``. Read more in the :ref:`User Guide `. Parameters ---------- n_samples : int, default=100 The number of samples. n_features : int, default=20 The total number of features. These comprise ``n_informative`` informative features, ``n_redundant`` redundant features, ``n_repeated`` duplicated features and ``n_features-n_informative-n_redundant-n_repeated`` useless features drawn at random. n_informative : int, default=2 The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension ``n_informative``. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, default=2 The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, default=0 The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, default=2 The number of classes (or labels) of the classification problem. n_clusters_per_class : int, default=2 The number of clusters per class. weights : array-like of shape (n_classes,) or (n_classes - 1,),\ default=None The proportions of samples assigned to each class. If None, then classes are balanced. Note that if ``len(weights) == n_classes - 1``, then the last class weight is automatically inferred. More than ``n_samples`` samples may be returned if the sum of ``weights`` exceeds 1. Note that the actual class proportions will not exactly match ``weights`` when ``flip_y`` isn't 0. flip_y : float, default=0.01 The fraction of samples whose class is assigned randomly. Larger values introduce noise in the labels and make the classification task harder. Note that the default setting flip_y > 0 might lead to less than ``n_classes`` in y in some cases. class_sep : float, default=1.0 The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the classification task easier. hypercube : bool, default=True If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, ndarray of shape (n_features,) or None, default=0.0 Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, ndarray of shape (n_features,) or None, default=1.0 Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : bool, default=True Shuffle the samples and the features. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary `. Returns ------- X : ndarray of shape (n_samples, n_features) The generated samples. y : ndarray of shape (n_samples,) The integer labels for class membership of each sample. See Also -------- make_blobs : Simplified variant. make_multilabel_classification : Unrelated generator for multilabel tasks. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the ""Madelon"" dataset. References ---------- .. [1] I. Guyon, ""Design of experiments for the NIPS 2003 variable selection benchmark"", 2003. """""" generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError( ""Number of informative, redundant and repeated "" ""features must sum to less than the number of total"" "" features"" ) # Use log2 to avoid overflow errors if n_informative < np.log2(n_classes * n_clusters_per_class): msg = ""n_classes({}) * n_clusters_per_class({}) must be"" msg += "" smaller or equal 2**n_informative({})={}"" raise ValueError( msg.format( n_classes, n_clusters_per_class, n_informative, 2**n_informative ) ) if weights is not None: if len(weights) not in [n_classes, n_classes - 1]: raise ValueError( ""Weights specified but incompatible with number of classes."" ) if len(weights) == n_classes - 1: if isinstance(weights, list): weights = weights + [1.0 - sum(weights)] else: weights = np.resize(weights, n_classes) weights[-1] = 1.0 - sum(weights[:-1]) else: weights = [1.0 / n_classes] * n_classes n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class # Distribute samples among clusters by weight n_samples_per_cluster = [ int(n_samples * weights[k % n_classes] / n_clusters_per_class) for k in range(n_clusters) ] for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Initialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype( float, copy=False ) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.uniform(size=(n_clusters, 1)) centroids *= generator.uniform(size=(1, n_informative)) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative)) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1 X[:, n_informative : n_informative + n_redundant] = np.dot( X[:, :n_informative], B ) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp) X[:, n : n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless)) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.uniform(size=n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.uniform(size=n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.uniform(size=n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y " 57924,"def get_enabled_instances(): enabled_instances = [] instances = demisto.getModules() for instance_name, data in instances.items(): if data.get('state') == 'active': enabled_instances.append(instance_name) readable_output = [ { 'Instance Name': instance, 'Brand': instances[instance].get('brand') } for instance in enabled_instances ] return CommandResults( outputs_prefix='EnabledInstances', outputs=enabled_instances, readable_output=tableToMarkdown('Enabled Instances', readable_output), raw_response=enabled_instances ) ","def get_enabled_instances(): enabled_instances = [] instances = demisto.getModules() readable_output = [] for instance_name, data in instances.items(): if data.get('state') == 'active': enabled_instances.append(instance_name) readable_output.append({ 'Instance Name': instance_name, 'Brand': data.get('brand') }) return CommandResults( outputs_prefix='EnabledInstances', outputs=enabled_instances, readable_output=tableToMarkdown('Enabled Instances', readable_output), raw_response=enabled_instances ) " 17004,"def datetime_or_zero(value: str) -> datetime | None: """"""Convert strings to datetimes, if invalid, return None."""""" # To handle cameras that return times like '0000-00-00T00:00:00Z' (e.g. hikvision) try: ret = dt_util.parse_datetime(value) except ValueError: return None if ret is None: return None return ret ","def datetime_or_zero(value: str) -> datetime | None: """"""Convert strings to datetimes, if invalid, return None."""""" # To handle cameras that return times like '0000-00-00T00:00:00Z' (e.g. hikvision) try: return dt_util.parse_datetime(value) except ValueError: return None if ret is None: return None return ret " 5086,"def motion_fn(mouseevent): if mouseevent.inaxes is colorbar.ax.axes: adjust_colorbar(mouseevent) ","def on_move(mouseevent): if mouseevent.inaxes is colorbar.ax.axes: adjust_colorbar(mouseevent) " 5773,"def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, *, full_output=False, **kwargs): """""" Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps``. Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : array_like or object The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, but can actually be any object. ydata : array_like The dependent data, a length M array - nominally ``f(xdata, ...)``. p0 : array_like, optional Initial guess for the parameters (length N). If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence or MxM array, optional Determines the uncertainty in `ydata`. If we define residuals as ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` depends on its number of dimensions: - A 1-D `sigma` should contain values of standard deviations of errors in `ydata`. In this case, the optimized function is ``chisq = sum((r / sigma) ** 2)``. - A 2-D `sigma` should contain the covariance matrix of errors in `ydata`. In this case, the optimized function is ``chisq = r.T @ inv(sigma) @ r``. .. versionadded:: 0.19 None (default) is equivalent of 1-D `sigma` filled with ones. absolute_sigma : bool, optional If True, `sigma` is used in an absolute sense and the estimated parameter covariance `pcov` reflects these absolute values. If False (default), only the relative magnitudes of the `sigma` values matter. The returned parameter covariance matrix `pcov` is based on scaling `sigma` by a constant factor. This constant is set by demanding that the reduced `chisq` for the optimal parameters `popt` when using the *scaled* `sigma` equals unity. In other words, `sigma` is scaled to match the sample variance of the residuals after the fit. Default is False. Mathematically, ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True. bounds : 2-tuple of array_like, optional Lower and upper bounds on parameters. Defaults to no bounds. Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters). Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. .. versionadded:: 0.17 method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 full_output : boolean, optional If True, this function returns additioal information: `infodict`, `mesg`, and `ier`. .. versionadded:: 1.9 kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared residuals of ``f(xdata, *popt) - ydata`` is minimized. pcov : 2-D array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters use ``perr = np.sqrt(np.diag(pcov))``. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. infodict : dict (returned only if `full_output` is True) a dictionary of optional outputs with the keys: ``nfev`` The number of function calls. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. ``fvec`` The function values evaluated at the solution. ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. Method 'lm' only provides this information. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. Method 'lm' only provides this information. ``qtf`` The vector (transpose(q) * fvec). Method 'lm' only provides this information. .. versionadded:: 1.9 mesg : str (returned only if `full_output` is True) A string message giving information about the solution. .. versionadded:: 1.9 ier : int (returnned only if `full_output` is True) An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable `mesg` gives more information. .. versionadded:: 1.9 Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. scipy.stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c Define the data to be fit with some noise: >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> rng = np.random.default_rng() >>> y_noise = 0.2 * rng.normal(size=xdata.size) >>> ydata = y + y_noise >>> plt.plot(xdata, ydata, 'b-', label='data') Fit for the parameters a, b, c of the function `func`: >>> popt, pcov = curve_fit(func, xdata, ydata) >>> popt array([2.56274217, 1.37268521, 0.47427475]) >>> plt.plot(xdata, func(xdata, *popt), 'r-', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) Constrain the optimization to the region of ``0 <= a <= 3``, ``0 <= b <= 1`` and ``0 <= c <= 0.5``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) >>> popt array([2.43736712, 1. , 0.34463856]) >>> plt.plot(xdata, func(xdata, *popt), 'g--', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend() >>> plt.show() """""" if p0 is None: # determine number of parameters by inspecting the function sig = _getfullargspec(f) args = sig.args if len(args) < 2: raise ValueError(""Unable to determine number of fit parameters."") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError(""Method 'lm' only works for unconstrained problems. "" ""Use 'trf' or 'dogbox' instead."") # optimization may produce garbage for float32 inputs, cast them to float64 # NaNs cannot be handled if check_finite: ydata = np.asarray_chkfinite(ydata, float) else: ydata = np.asarray(ydata, float) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata, float) else: xdata = np.asarray(xdata, float) if ydata.size == 0: raise ValueError(""`ydata` must not be empty!"") # Determine type of sigma if sigma is not None: sigma = np.asarray(sigma) # if 1-D, sigma are errors, define transform = 1/sigma if sigma.shape == (ydata.size, ): transform = 1.0 / sigma # if 2-D, sigma is the covariance matrix, # define transform = L such that L L^T = C elif sigma.shape == (ydata.size, ydata.size): try: # scipy.linalg.cholesky requires lower=True to return L L^T = A transform = cholesky(sigma, lower=True) except LinAlgError as e: raise ValueError(""`sigma` must be positive definite."") from e else: raise ValueError(""`sigma` has incorrect shape."") else: transform = None func = _wrap_func(f, xdata, ydata, transform) if callable(jac): jac = _wrap_jac(jac, xdata, transform) elif jac is None and method != 'lm': jac = '2-point' if 'args' in kwargs: # The specification for the model function `f` does not support # additional arguments. Refer to the `curve_fit` docstring for # acceptable call signatures of `f`. raise ValueError(""'args' is not a supported keyword argument."") if method == 'lm': # if ydata.size == 1, this might be used for broadcast. if ydata.size != 1 and n > ydata.size: raise TypeError(f""The number of func parameters={n} must not"" f"" exceed the number of data points={ydata.size}"") res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res ysize = len(infodict['fvec']) cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError(""Optimal parameters not found: "" + errmsg) else: # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. if 'max_nfev' not in kwargs: kwargs['max_nfev'] = kwargs.pop('maxfev', None) res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError(""Optimal parameters not found: "" + res.message) infodict = dict(nfev=res.nfev, fvec=res.fun) ier = res.status errmsg = res.message ysize = len(res.fun) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ysize > p0.size: s_sq = cost / (ysize - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if full_output: return popt, pcov, infodict, errmsg, ier else: return popt, pcov ","def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, *, full_output=False, **kwargs): """""" Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps``. Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : array_like or object The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, but can actually be any object. ydata : array_like The dependent data, a length M array - nominally ``f(xdata, ...)``. p0 : array_like, optional Initial guess for the parameters (length N). If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence or MxM array, optional Determines the uncertainty in `ydata`. If we define residuals as ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` depends on its number of dimensions: - A 1-D `sigma` should contain values of standard deviations of errors in `ydata`. In this case, the optimized function is ``chisq = sum((r / sigma) ** 2)``. - A 2-D `sigma` should contain the covariance matrix of errors in `ydata`. In this case, the optimized function is ``chisq = r.T @ inv(sigma) @ r``. .. versionadded:: 0.19 None (default) is equivalent of 1-D `sigma` filled with ones. absolute_sigma : bool, optional If True, `sigma` is used in an absolute sense and the estimated parameter covariance `pcov` reflects these absolute values. If False (default), only the relative magnitudes of the `sigma` values matter. The returned parameter covariance matrix `pcov` is based on scaling `sigma` by a constant factor. This constant is set by demanding that the reduced `chisq` for the optimal parameters `popt` when using the *scaled* `sigma` equals unity. In other words, `sigma` is scaled to match the sample variance of the residuals after the fit. Default is False. Mathematically, ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True. bounds : 2-tuple of array_like, optional Lower and upper bounds on parameters. Defaults to no bounds. Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters). Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. .. versionadded:: 0.17 method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 full_output : boolean, optional If True, this function returns additioal information: `infodict`, `mesg`, and `ier`. .. versionadded:: 1.9 **kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared residuals of ``f(xdata, *popt) - ydata`` is minimized. pcov : 2-D array The estimated covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters use ``perr = np.sqrt(np.diag(pcov))``. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. infodict : dict (returned only if `full_output` is True) a dictionary of optional outputs with the keys: ``nfev`` The number of function calls. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. ``fvec`` The function values evaluated at the solution. ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. Method 'lm' only provides this information. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. Method 'lm' only provides this information. ``qtf`` The vector (transpose(q) * fvec). Method 'lm' only provides this information. .. versionadded:: 1.9 mesg : str (returned only if `full_output` is True) A string message giving information about the solution. .. versionadded:: 1.9 ier : int (returnned only if `full_output` is True) An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable `mesg` gives more information. .. versionadded:: 1.9 Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. scipy.stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c Define the data to be fit with some noise: >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> rng = np.random.default_rng() >>> y_noise = 0.2 * rng.normal(size=xdata.size) >>> ydata = y + y_noise >>> plt.plot(xdata, ydata, 'b-', label='data') Fit for the parameters a, b, c of the function `func`: >>> popt, pcov = curve_fit(func, xdata, ydata) >>> popt array([2.56274217, 1.37268521, 0.47427475]) >>> plt.plot(xdata, func(xdata, *popt), 'r-', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) Constrain the optimization to the region of ``0 <= a <= 3``, ``0 <= b <= 1`` and ``0 <= c <= 0.5``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) >>> popt array([2.43736712, 1. , 0.34463856]) >>> plt.plot(xdata, func(xdata, *popt), 'g--', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend() >>> plt.show() """""" if p0 is None: # determine number of parameters by inspecting the function sig = _getfullargspec(f) args = sig.args if len(args) < 2: raise ValueError(""Unable to determine number of fit parameters."") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError(""Method 'lm' only works for unconstrained problems. "" ""Use 'trf' or 'dogbox' instead."") # optimization may produce garbage for float32 inputs, cast them to float64 # NaNs cannot be handled if check_finite: ydata = np.asarray_chkfinite(ydata, float) else: ydata = np.asarray(ydata, float) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata, float) else: xdata = np.asarray(xdata, float) if ydata.size == 0: raise ValueError(""`ydata` must not be empty!"") # Determine type of sigma if sigma is not None: sigma = np.asarray(sigma) # if 1-D, sigma are errors, define transform = 1/sigma if sigma.shape == (ydata.size, ): transform = 1.0 / sigma # if 2-D, sigma is the covariance matrix, # define transform = L such that L L^T = C elif sigma.shape == (ydata.size, ydata.size): try: # scipy.linalg.cholesky requires lower=True to return L L^T = A transform = cholesky(sigma, lower=True) except LinAlgError as e: raise ValueError(""`sigma` must be positive definite."") from e else: raise ValueError(""`sigma` has incorrect shape."") else: transform = None func = _wrap_func(f, xdata, ydata, transform) if callable(jac): jac = _wrap_jac(jac, xdata, transform) elif jac is None and method != 'lm': jac = '2-point' if 'args' in kwargs: # The specification for the model function `f` does not support # additional arguments. Refer to the `curve_fit` docstring for # acceptable call signatures of `f`. raise ValueError(""'args' is not a supported keyword argument."") if method == 'lm': # if ydata.size == 1, this might be used for broadcast. if ydata.size != 1 and n > ydata.size: raise TypeError(f""The number of func parameters={n} must not"" f"" exceed the number of data points={ydata.size}"") res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res ysize = len(infodict['fvec']) cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError(""Optimal parameters not found: "" + errmsg) else: # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. if 'max_nfev' not in kwargs: kwargs['max_nfev'] = kwargs.pop('maxfev', None) res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError(""Optimal parameters not found: "" + res.message) infodict = dict(nfev=res.nfev, fvec=res.fun) ier = res.status errmsg = res.message ysize = len(res.fun) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ysize > p0.size: s_sq = cost / (ysize - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if full_output: return popt, pcov, infodict, errmsg, ier else: return popt, pcov " 32281,"def panorama_query_logs_command(args: dict): """""" Query logs """""" log_type = args.get('log-type') number_of_logs = args.get('number_of_logs') query = args.get('query') address_src = args.get('addr-src') address_dst = args.get('addr-dst') ip_ = args.get('ip') zone_src = args.get('zone-src') zone_dst = args.get('zone-dst') time_generated = args.get('time-generated') action = args.get('action') port_dst = args.get('port-dst') rule = args.get('rule') filedigest = args.get('filedigest') url = args.get('url') use_polling = args.get('polling', 'false') == 'true' job_id = args.get('job_id') cmd = demisto.command() interval_in_seconds = int(args.get('interval_in_seconds', 60)) timeout = int(args.get('timeout', 600)) script_results = [] if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') if use_polling: ScheduledCommand.raise_error_if_not_supported() if not job_id: # create new search result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') job_id = result['response']['result']['job'] polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) readable_output = f""Panorama log query search created successfully (Job ID: {job_id})"" script_results.append(CommandResults( readable_output=readable_output, scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if result['response']['result']['job']['status'] != ""FIN"": polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) script_results.append(CommandResults( scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Complete' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][ 'result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) script_results.append(CommandResults( outputs_prefix='Panorama.Monitor', outputs_key_field='JobID', outputs=result, readable_output=human_readable, ignore_auto_extract=True)) else: result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: raise Exception(f""Query logs failed. Reason is: {result['response']['msg']['line']}"") else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } script_results.append({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {""Panorama.Monitor(val.JobID == obj.JobID)"": query_logs_output} }) return_results(script_results) ","def panorama_query_logs_command(args: dict): """""" Query logs """""" log_type = args.get('log-type') number_of_logs = args.get('number_of_logs') query = args.get('query') address_src = args.get('addr-src') address_dst = args.get('addr-dst') ip_ = args.get('ip') zone_src = args.get('zone-src') zone_dst = args.get('zone-dst') time_generated = args.get('time-generated') action = args.get('action') port_dst = args.get('port-dst') rule = args.get('rule') filedigest = args.get('filedigest') url = args.get('url') use_polling = argToBoolean(args.get('polling', 'false')) job_id = args.get('job_id') cmd = demisto.command() interval_in_seconds = int(args.get('interval_in_seconds', 60)) timeout = int(args.get('timeout', 600)) script_results = [] if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') if use_polling: ScheduledCommand.raise_error_if_not_supported() if not job_id: # create new search result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') job_id = result['response']['result']['job'] polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) readable_output = f""Panorama log query search created successfully (Job ID: {job_id})"" script_results.append(CommandResults( readable_output=readable_output, scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if result['response']['result']['job']['status'] != ""FIN"": polling_args = { 'job_id': job_id, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_seconds, args=polling_args, timeout_in_seconds=timeout ) script_results.append(CommandResults( scheduled_command=scheduled_command )) else: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Complete' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][ 'result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) script_results.append(CommandResults( outputs_prefix='Panorama.Monitor', outputs_key_field='JobID', outputs=result, readable_output=human_readable, ignore_auto_extract=True)) else: result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: raise Exception(f""Query logs failed. Reason is: {result['response']['msg']['line']}"") else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } script_results.append({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {""Panorama.Monitor(val.JobID == obj.JobID)"": query_logs_output} }) return_results(script_results) " 54075,"def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines from argcomplete.completers import FilesCompleter from knack.arguments import ignore_type, CLIArgumentType from .sdkutil import get_table_data_type from .completers import get_storage_name_completion_list t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService') t_file_service = self.get_sdk('file#FileService') t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService') t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional', resource_type=CUSTOM_DATA_STORAGE_BLOB) t_rehydrate_priority = self.get_sdk('_generated.models._azure_blob_storage_enums#RehydratePriority', resource_type=CUSTOM_DATA_STORAGE_BLOB) blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs', parent='container_name')) container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_containers')) directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.', completer=get_storage_name_completion_list(t_file_service, 'list_directories_and_files', parent='share_name')) share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.', completer=get_storage_name_completion_list(t_file_service, 'list_shares')) table_name_type = CLIArgumentType(options_list=['--table-name', '-t'], completer=get_storage_name_completion_list(t_table_service, 'list_tables')) progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.', action='store_true') sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \ 'referenced with --policy-name that specifies this value. Can be combined.' lease_type = CLIArgumentType( options_list='--lease-id', help='Required if the blob has an active lease.' ) snapshot_type = CLIArgumentType( help='The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot ' 'to retrieve.' ) tags_type = CLIArgumentType( nargs='*', validator=validate_tags, min_api='2019-12-12', is_preview=True, help='space-separated tags: key[=value] [key[=value] ...]. Tags are case-sensitive. The tag set may ' 'contain at most 10 tags. Tag keys must be between 1 and 128 characters, and tag values must be ' 'between 0 and 256 characters. Valid tag key and value characters include: lowercase and uppercase ' 'letters, digits (0-9), space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals ' '(=), underscore (_).' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') num_results_type = CLIArgumentType( default=5000, validator=validate_storage_data_plane_list, options_list='--num-results', help='Specify the maximum number to return. If the request does not specify ' 'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that ' 'if the listing operation crosses a partition boundary, then the service will return a continuation token ' 'for retrieving the remaining of the results. Provide ""*"" to return all.' ) version_id_type = CLIArgumentType( help='An optional blob version ID. This parameter is only for versioning enabled account. ', min_api='2019-12-12', is_preview=True ) # Fix preview display tier_type = CLIArgumentType( arg_type=get_enum_type(t_blob_tier), min_api='2019-02-02', help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob ' 'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 ' 'and this is only applicable to page blobs on premium storage accounts; For block blob, possible ' 'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.' ) rehydrate_priority_type = CLIArgumentType( arg_type=get_enum_type(t_rehydrate_priority), options_list=('--rehydrate-priority', '-r'), min_api='2019-02-02', help='Indicate the priority with which to rehydrate an archived blob.') tags_condition_type = CLIArgumentType( options_list='--tags-condition', min_api='2019-12-12', help='Specify a SQL where clause on blob tags to operate only on blobs with a matching value.') timeout_type = CLIArgumentType( help='Request timeout in seconds. Applies to each call to the service.', type=int ) t_delete_snapshots = self.get_sdk('_generated.models#DeleteSnapshotsOptionType', resource_type=CUSTOM_DATA_STORAGE_BLOB) delete_snapshots_type = CLIArgumentType( arg_type=get_enum_type(t_delete_snapshots), help='Required if the blob has associated snapshots. ""only"": Deletes only the blobs snapshots. ' '""include"": Deletes the blob along with all snapshots.') overwrite_type = CLIArgumentType( arg_type=get_three_state_flag(), help='Whether the blob to be uploaded should overwrite the current data. If True, upload_blob will ' 'overwrite the existing data. If set to False, the operation will fail with ResourceExistsError. ' 'The exception to the above is with Append blob types: if set to False and the data already exists, ' 'an error will not be raised and the data will be appended to the existing blob. If set ' 'overwrite=True, then the existing append blob will be deleted, and a new one created. ' 'Defaults to False.') with self.argument_context('storage') as c: c.argument('container_name', container_name_type) c.argument('directory_name', directory_type) c.argument('share_name', share_name_type) c.argument('table_name', table_name_type) c.argument('retry_wait', options_list=('--retry-interval',)) c.ignore('progress_callback') c.argument('metadata', nargs='+', help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.', validator=validate_metadata) c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int) with self.argument_context('storage blob') as c: c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type) c.argument('destination_path', help='The destination path that will be appended to the blob name.') c.argument('socket_timeout', deprecate_info=c.deprecate(hide=True), help='The socket timeout(secs), used by the service to regulate data flow.') with self.argument_context('storage blob copy') as c: c.argument('container_name', container_name_type, options_list=('--destination-container', '-c')) c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'), help='Name of the destination blob. If the exists, it will be overwritten.') with self.argument_context('storage blob copy start') as c: from ._validators import validate_source_url c.register_blob_arguments() c.register_precondition_options() c.register_precondition_options(prefix='source_') c.register_source_uri_arguments(validator=validate_source_url) c.ignore('incremental_copy') c.argument('if_match', options_list=['--destination-if-match']) c.argument('if_modified_since', options_list=['--destination-if-modified-since']) c.argument('if_none_match', options_list=['--destination-if-none-match']) c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since']) c.argument('if_tags_match_condition', options_list=['--destination-tags-condition']) c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True, help='Name of the destination blob. If the exists, it will be overwritten.') c.argument('container_name', options_list=['--destination-container', '-c'], required=True, help='The container name.') c.extra('destination_lease', options_list='--destination-lease-id', help='The lease ID specified for this header must match the lease ID of the estination blob. ' 'If the request does not include the lease ID or it is not valid, the operation fails with status ' 'code 412 (Precondition Failed).') c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source', help='Specify this to perform the Copy Blob operation only if the lease ID given matches the ' 'active lease ID of the source blob.') c.extra('rehydrate_priority', rehydrate_priority_type) c.extra('requires_sync', arg_type=get_three_state_flag(), help='Enforce that the service will not return a response until the copy is complete.') c.extra('tier', tier_type) c.extra('tags', tags_type) with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c: from ._validators import get_source_file_or_blob_service_client c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client) c.extra('source_account_name') c.extra('source_account_key') c.extra('source_uri') c.argument('source_sas') c.argument('source_container') c.argument('source_share') with self.argument_context('storage blob delete') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('snapshot', snapshot_type) c.extra('version_id', version_id_type) c.argument('delete_snapshots', delete_snapshots_type) with self.argument_context('storage blob delete-batch') as c: c.register_precondition_options() c.ignore('container_name') c.argument('source', options_list=('--source', '-s')) c.argument('delete_snapshots', delete_snapshots_type) c.argument('lease_id', help='The active lease id for the blob.') with self.argument_context('storage blob download') as c: c.register_blob_arguments() c.register_precondition_options() c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(), help='Path of file to write out to.') c.extra('start_range', type=int, help='Start of byte range to use for downloading a section of the blob. If no end_range is given, ' 'all bytes after the start_range will be downloaded. The start_range and end_range params are ' 'inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob.') c.extra('end_range', type=int, help='End of byte range to use for downloading a section of the blob. If end_range is given, ' 'start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, ' 'end_range=511 will download first 512 bytes of blob.') c.extra('no_progress', progress_type, validator=add_download_progress_callback) c.extra('snapshot', snapshot_type) c.extra('lease', lease_type) c.extra('version_id', version_id_type) c.extra('max_concurrency', options_list='--max-connections', type=int, default=2, help='The number of parallel connections with which to download.') c.argument('open_mode', help='Mode to use when opening the file. Note that specifying append only open_mode ' 'prevents parallel download. So, max_connections must be set to 1 if this open_mode is used.') c.extra('validate_content', action='store_true', min_api='2016-05-31', help='If true, calculates an MD5 hash for each chunk of the blob. The storage service checks the ' 'hash of the content that has arrived with the hash that was sent. This is primarily valuable for ' 'detecting bitflips on the wire if using http instead of https, as https (the default), will already ' 'validate. Note that this MD5 hash is not stored with the blob. Also note that if enabled, the ' 'memory-efficient algorithm will not be used because computing the MD5 hash requires buffering ' 'entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.') with self.argument_context('storage blob download-batch') as c: # c.register_precondition_options() c.ignore('container_name') c.argument('destination', options_list=('--destination', '-d')) c.argument('source', options_list=('--source', '-s')) c.extra('max_concurrency', options_list='--max-connections', type=int, default=2, help='The number of parallel connections with which to download.') c.extra('no_progress', progress_type) with self.argument_context('storage blob exists') as c: c.register_blob_arguments() with self.argument_context('storage blob filter') as c: c.argument('filter_expression', options_list=['--tag-filter']) with self.argument_context('storage blob generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_blob_permissions = self.get_sdk('_models#BlobSasPermissions', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_sas_arguments() c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed' 'using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed' 'using this shared access signature.') c.argument('full_uri', action='store_true', help='Indicate that this command return the full blob URI and the shared access signature token.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help=""Indicates that this command return the SAS signed with the user delegation key. "" ""The expiry parameter and '--auth-mode login' are required if this argument is specified. "") c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name', 'get_container_acl')) c.argument('ip', help='Specify an IP address or a range of IP addresses from which to accept requests. ' 'If the IP address from which the request originates does not match the IP address or address range ' 'specified on the SAS token, the request is not authenticated. For example, specifying ip=168.1.5.65' ' or ip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses.') c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_blob_permissions)), validator=get_permission_validator(t_blob_permissions)) c.argument('snapshot', snapshot_type) c.ignore('sas_token') c.argument('version_id', version_id_type) with self.argument_context('storage blob lease') as c: c.argument('blob_name', arg_type=blob_name_type) with self.argument_context('storage blob lease acquire') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. ' 'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.') c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for ' 'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease ' 'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob lease break') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.argument('lease_break_period', type=int, help=""This is the proposed duration of seconds that the lease should continue before it is broken, "" ""between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "" ""on the lease. If longer, the time remaining on the lease is used. A new lease will not be "" ""available before the break period has expired, but the lease may be held for longer than the break "" ""period. If this header does not appear with a break operation, a fixed-duration lease breaks after "" ""the remaining lease period elapses, and an infinite lease breaks immediately."") c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob lease change') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 ' '(Invalid request) if the proposed lease ID is not in the correct format.', required=True) c.extra('lease_id', help='Required if the blob has an active lease.', required=True) c.extra('if_tags_match_condition', tags_condition_type) for item in ['release', 'renew']: with self.argument_context('storage blob lease {}'.format(item)) as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('lease_id', help='Required if the blob has an active lease.', required=True) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob list') as c: from .track2_util import get_include_help_string t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_container_arguments() c.argument('delimiter', help='When the request includes this parameter, the operation returns a BlobPrefix element in the ' 'result list that acts as a placeholder for all blobs whose names begin with the same substring ' 'up to the appearance of the delimiter character. The delimiter may be a single character or a ' 'string.') c.argument('include', help=""Specify one or more additional datasets to include in the response. "" ""Options include: {}. Can be combined."".format(get_include_help_string(t_blob_include)), validator=validate_included_datasets_v2) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') for item in ['show', 'update']: with self.argument_context('storage blob metadata {}'.format(item), resource_type=CUSTOM_DATA_STORAGE_BLOB) \ as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('snapshot', snapshot_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob service-properties delete-policy update') as c: c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.') c.argument('days_retained', type=int, help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c: c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete', help='Enables soft-delete.') c.argument('delete_retention_period', type=int, arg_group='Soft Delete', help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(), help='Enables static-website.') c.argument('index_document', help='The default name of the index page under each directory.', arg_group='Static Website') c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website', help='The absolute path of the custom 404 page.') c.argument('default_index_document_path', options_list='--default-index-path', is_preview=True, help='Absolute path of the default index page.', arg_group='Static Website') with self.argument_context('storage blob set-tier', resource_type=CUSTOM_DATA_STORAGE_BLOB) as c: c.register_blob_arguments() c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page'))) c.extra('tier', tier_type, validator=blob_tier_validator, required=True) c.argument('rehydrate_priority', rehydrate_priority_type, is_preview=True) c.extra('version_id', version_id_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob show') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('snapshot', snapshot_type) c.extra('lease', lease_type) c.argument('version_id', version_id_type) with self.argument_context('storage blob snapshot') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob undelete', resource_type=CUSTOM_DATA_STORAGE_BLOB) as c: c.register_blob_arguments() with self.argument_context('storage blob tag list') as c: c.register_blob_arguments() c.extra('version_id', version_id_type) c.extra('snapshot', snapshot_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob tag set') as c: c.register_blob_arguments() c.extra('version_id', version_id_type) c.argument('tags', tags_type, required=True) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob upload') as c: from ._validators import validate_encryption_scope_client_params, validate_upload_blob from .sdkutil import get_blob_types t_blob_content_settings = self.get_sdk('_models#ContentSettings', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_blob_arguments() c.register_precondition_options() c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group=""Content Control"") c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(), help='Path of the file to upload as the blob content.', validator=validate_upload_blob) c.argument('data', help='The blob data to upload.', required=False, is_preview=True, min_api='2019-02-02') c.argument('length', type=int, help='Number of bytes to read from the stream. This is optional, but should be ' 'supplied for optimal performance. Cooperate with --data.', is_preview=True, min_api='2019-02-02') c.argument('overwrite', arg_type=get_three_state_flag(), arg_group=""Additional Flags"", is_preview=True, help='Whether the blob to be uploaded should overwrite the current data. If True, blob upload ' 'operation will overwrite the existing data. If set to False, the operation will fail with ' 'ResourceExistsError. The exception to the above is with Append blob types: if set to False and the ' 'data already exists, an error will not be raised and the data will be appended to the existing ' 'blob. If set overwrite=True, then the existing append blob will be deleted, and a new one created. ' 'Defaults to False.') c.argument('max_connections', type=int, arg_group=""Additional Flags"", help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') c.extra('maxsize_condition', type=int, arg_group=""Content Control"", help='The max length in bytes permitted for the append blob.') c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type, arg_type=get_enum_type(get_blob_types()), arg_group=""Additional Flags"") c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group=""Content Control"") c.extra('no_progress', progress_type, validator=add_upload_progress_callback, arg_group=""Additional Flags"") c.extra('tier', tier_type, validator=blob_tier_validator, arg_group=""Additional Flags"") c.argument('encryption_scope', validator=validate_encryption_scope_client_params, help='A predefined encryption scope used to encrypt the data on the service.', arg_group=""Additional Flags"") c.argument('lease_id', help='Required if the blob has an active lease.') c.extra('tags', arg_type=tags_type, arg_group=""Additional Flags"") c.argument('metadata', arg_group=""Additional Flags"") c.argument('timeout', arg_group=""Additional Flags"") with self.argument_context('storage blob upload-batch') as c: from .sdkutil import get_blob_types t_blob_content_settings = self.get_sdk('_models#ContentSettings', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_precondition_options() c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control') c.ignore('source_files', 'destination_container_name') c.argument('source', options_list=('--source', '-s')) c.argument('destination', options_list=('--destination', '-d')) c.argument('max_connections', type=int, help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') c.argument('maxsize_condition', arg_group='Content Control') c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control') c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types())) c.extra('no_progress', progress_type) c.extra('tier', tier_type, is_preview=True) c.extra('overwrite', overwrite_type, is_preview=True) with self.argument_context('storage blob query') as c: from ._validators import validate_text_configuration c.register_blob_arguments() c.register_precondition_options() line_separator = CLIArgumentType(help=""The string used to separate records."", default='\n') column_separator = CLIArgumentType(help=""The string used to separate columns."", default=',') quote_char = CLIArgumentType(help=""The string used to quote a specific field."", default='""') record_separator = CLIArgumentType(help=""The string used to separate records."", default='\n') escape_char = CLIArgumentType(help=""The string used as an escape character. Default to empty."", default="""") has_header = CLIArgumentType( arg_type=get_three_state_flag(), help=""Whether the blob data includes headers in the first line. "" ""The default value is False, meaning that the data will be returned inclusive of the first line. "" ""If set to True, the data will be returned exclusive of the first line."", default=False) c.extra('lease', options_list='--lease-id', help='Required if the blob has an active lease.') c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression ' 'is 256KiB. For more information about the expression syntax, please see ' 'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference') c.extra('input_format', arg_type=get_enum_type(['csv', 'json', 'parquet']), validator=validate_text_configuration, help='Serialization type of the data currently stored in the blob. ' 'The default is to treat the blob data as CSV data formatted in the default dialect.' 'The blob data will be reformatted according to that profile when blob format is specified. ' 'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.') c.extra('output_format', arg_type=get_enum_type(['csv', 'json']), help='Output serialization type for the data stream. ' 'By default the data will be returned as it is represented in the blob. ' 'By providing an output format, the blob data will be reformatted according to that profile. ' 'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.' 'By default data with input_format of `parquet` will have the output_format of `csv`') c.extra('in_line_separator', arg_group='Input Json Text Configuration', arg_type=line_separator) c.extra('in_column_separator', arg_group='Input Delimited Text Configuration', arg_type=column_separator) c.extra('in_quote_char', arg_group='Input Delimited Text Configuration', arg_type=quote_char) c.extra('in_record_separator', arg_group='Input Delimited Text Configuration', arg_type=record_separator) c.extra('in_escape_char', arg_group='Input Delimited Text Configuration', arg_type=escape_char) c.extra('in_has_header', arg_group='Input Delimited Text Configuration', arg_type=has_header) c.extra('out_line_separator', arg_group='Output Json Text Configuration', arg_type=line_separator) c.extra('out_column_separator', arg_group='Output Delimited Text Configuration', arg_type=column_separator) c.extra('out_quote_char', arg_group='Output Delimited Text Configuration', arg_type=quote_char) c.extra('out_record_separator', arg_group='Output Delimited Text Configuration', arg_type=record_separator) c.extra('out_escape_char', arg_group='Output Delimited Text Configuration', arg_type=escape_char) c.extra('out_has_header', arg_group='Output Delimited Text Configuration', arg_type=has_header) c.extra('result_file', help='Specify the file path to save result.') c.ignore('input_config') c.ignore('output_config') with self.argument_context('storage container') as c: c.argument('container_name', container_name_type, options_list=('--name', '-n')) with self.argument_context('storage container generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_container_permissions = self.get_sdk('_models#ContainerSasPermissions', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_sas_arguments() c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name', 'get_container_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_container_permissions)), validator=get_permission_validator(t_container_permissions)) c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed' 'using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed' 'using this shared access signature.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help=""Indicates that this command return the SAS signed with the user delegation key. "" ""The expiry parameter and '--auth-mode login' are required if this argument is specified. "") c.ignore('sas_token') c.argument('full_uri', action='store_true', is_preview=True, help='Indicate that this command return the full blob URI and the shared access signature token.') with self.argument_context('storage container list') as c: c.extra('timeout', timeout_type) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('include_metadata', arg_type=get_three_state_flag(), help='Specify that container metadata to be returned in the response.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') c.argument('include_deleted', arg_type=get_three_state_flag(), min_api='2020-02-10', help='Specify that deleted containers to be returned in the response. This is for container restore ' 'enabled account. The default value is `False`') with self.argument_context('storage container restore') as c: c.argument('deleted_container_name', options_list=['--name', '-n'], help='Specify the name of the deleted container to restore.') c.argument('deleted_container_version', options_list=['--deleted-version'], help='Specify the version of the deleted container to restore.') c.argument('new_name', help='The new name for the deleted container to be restored to.') c.extra('timeout', timeout_type) ","def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines from argcomplete.completers import FilesCompleter from knack.arguments import ignore_type, CLIArgumentType from .sdkutil import get_table_data_type from .completers import get_storage_name_completion_list t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService') t_file_service = self.get_sdk('file#FileService') t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService') t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional', resource_type=CUSTOM_DATA_STORAGE_BLOB) t_rehydrate_priority = self.get_sdk('_generated.models._azure_blob_storage_enums#RehydratePriority', resource_type=CUSTOM_DATA_STORAGE_BLOB) blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs', parent='container_name')) container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_containers')) directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.', completer=get_storage_name_completion_list(t_file_service, 'list_directories_and_files', parent='share_name')) share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.', completer=get_storage_name_completion_list(t_file_service, 'list_shares')) table_name_type = CLIArgumentType(options_list=['--table-name', '-t'], completer=get_storage_name_completion_list(t_table_service, 'list_tables')) progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.', action='store_true') sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \ 'referenced with --policy-name that specifies this value. Can be combined.' lease_type = CLIArgumentType( options_list='--lease-id', help='Required if the blob has an active lease.' ) snapshot_type = CLIArgumentType( help='The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot ' 'to retrieve.' ) tags_type = CLIArgumentType( nargs='*', validator=validate_tags, min_api='2019-12-12', is_preview=True, help='space-separated tags: key[=value] [key[=value] ...]. Tags are case-sensitive. The tag set may ' 'contain at most 10 tags. Tag keys must be between 1 and 128 characters, and tag values must be ' 'between 0 and 256 characters. Valid tag key and value characters include: lowercase and uppercase ' 'letters, digits (0-9), space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals ' '(=), underscore (_).' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') num_results_type = CLIArgumentType( default=5000, validator=validate_storage_data_plane_list, options_list='--num-results', help='Specify the maximum number to return. If the request does not specify ' 'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that ' 'if the listing operation crosses a partition boundary, then the service will return a continuation token ' 'for retrieving the remaining of the results. Provide ""*"" to return all.' ) version_id_type = CLIArgumentType( help='An optional blob version ID. This parameter is only for versioning enabled account. ', min_api='2019-12-12', is_preview=True ) # Fix preview display tier_type = CLIArgumentType( arg_type=get_enum_type(t_blob_tier), min_api='2019-02-02', help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob ' 'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 ' 'and this is only applicable to page blobs on premium storage accounts; For block blob, possible ' 'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.' ) rehydrate_priority_type = CLIArgumentType( arg_type=get_enum_type(t_rehydrate_priority), options_list=('--rehydrate-priority', '-r'), min_api='2019-02-02', help='Indicate the priority with which to rehydrate an archived blob.') tags_condition_type = CLIArgumentType( options_list='--tags-condition', min_api='2019-12-12', help='Specify a SQL where clause on blob tags to operate only on blobs with a matching value.') timeout_type = CLIArgumentType( help='Request timeout in seconds. Applies to each call to the service.', type=int ) t_delete_snapshots = self.get_sdk('_generated.models#DeleteSnapshotsOptionType', resource_type=CUSTOM_DATA_STORAGE_BLOB) delete_snapshots_type = CLIArgumentType( arg_type=get_enum_type(t_delete_snapshots), help='Required if the blob has associated snapshots. ""only"": Deletes only the blobs snapshots. ' '""include"": Deletes the blob along with all snapshots.') overwrite_type = CLIArgumentType( arg_type=get_three_state_flag(), help='Whether the blob to be uploaded should overwrite the current data. If True, upload_blob will ' 'overwrite the existing data. If set to False, the operation will fail with ResourceExistsError. ' 'The exception to the above is with Append blob types: if set to False and the data already exists, ' 'an error will not be raised and the data will be appended to the existing blob. If set ' 'overwrite=True, then the existing append blob will be deleted, and a new one created. ' 'Defaults to False.') with self.argument_context('storage') as c: c.argument('container_name', container_name_type) c.argument('directory_name', directory_type) c.argument('share_name', share_name_type) c.argument('table_name', table_name_type) c.argument('retry_wait', options_list=('--retry-interval',)) c.ignore('progress_callback') c.argument('metadata', nargs='+', help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.', validator=validate_metadata) c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int) with self.argument_context('storage blob') as c: c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type) c.argument('destination_path', help='The destination path that will be appended to the blob name.') c.argument('socket_timeout', deprecate_info=c.deprecate(hide=True), help='The socket timeout(secs), used by the service to regulate data flow.') with self.argument_context('storage blob copy') as c: c.argument('container_name', container_name_type, options_list=('--destination-container', '-c')) c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'), help='Name of the destination blob. If the exists, it will be overwritten.') with self.argument_context('storage blob copy start') as c: from ._validators import validate_source_url c.register_blob_arguments() c.register_precondition_options() c.register_precondition_options(prefix='source_') c.register_source_uri_arguments(validator=validate_source_url) c.ignore('incremental_copy') c.argument('if_match', options_list=['--destination-if-match']) c.argument('if_modified_since', options_list=['--destination-if-modified-since']) c.argument('if_none_match', options_list=['--destination-if-none-match']) c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since']) c.argument('if_tags_match_condition', options_list=['--destination-tags-condition']) c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True, help='Name of the destination blob. If the exists, it will be overwritten.') c.argument('container_name', options_list=['--destination-container', '-c'], required=True, help='The container name.') c.extra('destination_lease', options_list='--destination-lease-id', help='The lease ID specified for this header must match the lease ID of the estination blob. ' 'If the request does not include the lease ID or it is not valid, the operation fails with status ' 'code 412 (Precondition Failed).') c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source', help='Specify this to perform the Copy Blob operation only if the lease ID given matches the ' 'active lease ID of the source blob.') c.extra('rehydrate_priority', rehydrate_priority_type) c.extra('requires_sync', arg_type=get_three_state_flag(), help='Enforce that the service will not return a response until the copy is complete.') c.extra('tier', tier_type) c.extra('tags', tags_type) with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c: from ._validators import get_source_file_or_blob_service_client c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client) c.extra('source_account_name') c.extra('source_account_key') c.extra('source_uri') c.argument('source_sas') c.argument('source_container') c.argument('source_share') with self.argument_context('storage blob delete') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('snapshot', snapshot_type) c.extra('version_id', version_id_type) c.argument('delete_snapshots', delete_snapshots_type) with self.argument_context('storage blob delete-batch') as c: c.register_precondition_options() c.ignore('container_name') c.argument('source', options_list=('--source', '-s')) c.argument('delete_snapshots', delete_snapshots_type) c.argument('lease_id', help='The active lease id for the blob.') with self.argument_context('storage blob download') as c: c.register_blob_arguments() c.register_precondition_options() c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(), help='Path of file to write out to.') c.extra('start_range', type=int, help='Start of byte range to use for downloading a section of the blob. If no end_range is given, ' 'all bytes after the start_range will be downloaded. The start_range and end_range params are ' 'inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of blob.') c.extra('end_range', type=int, help='End of byte range to use for downloading a section of the blob. If end_range is given, ' 'start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, ' 'end_range=511 will download first 512 bytes of blob.') c.extra('no_progress', progress_type, validator=add_download_progress_callback) c.extra('snapshot', snapshot_type) c.extra('lease', lease_type) c.extra('version_id', version_id_type) c.extra('max_concurrency', options_list='--max-connections', type=int, default=2, help='The number of parallel connections with which to download.') c.argument('open_mode', help='Mode to use when opening the file. Note that specifying append only open_mode ' 'prevents parallel download. So, max_connections must be set to 1 if this open_mode is used.') c.extra('validate_content', action='store_true', min_api='2016-05-31', help='If true, calculates an MD5 hash for each chunk of the blob. The storage service checks the ' 'hash of the content that has arrived with the hash that was sent. This is primarily valuable for ' 'detecting bitflips on the wire if using http instead of https, as https (the default), will already ' 'validate. Note that this MD5 hash is not stored with the blob. Also note that if enabled, the ' 'memory-efficient algorithm will not be used because computing the MD5 hash requires buffering ' 'entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.') with self.argument_context('storage blob download-batch') as c: # c.register_precondition_options() c.ignore('container_name') c.argument('destination', options_list=('--destination', '-d')) c.argument('source', options_list=('--source', '-s')) c.extra('max_concurrency', options_list='--max-connections', type=int, default=2, help='The number of parallel connections with which to download.') c.extra('no_progress', progress_type) with self.argument_context('storage blob exists') as c: c.register_blob_arguments() with self.argument_context('storage blob filter') as c: c.argument('filter_expression', options_list=['--tag-filter']) with self.argument_context('storage blob generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_blob_permissions = self.get_sdk('_models#BlobSasPermissions', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_sas_arguments() c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed' 'using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed' 'using this shared access signature.') c.argument('full_uri', action='store_true', help='Indicate that this command return the full blob URI and the shared access signature token.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help=""Indicates that this command return the SAS signed with the user delegation key. "" ""The expiry parameter and '--auth-mode login' are required if this argument is specified. "") c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name', 'get_container_acl')) c.argument('ip', help='Specify an IP address or a range of IP addresses from which to accept requests. ' 'If the IP address from which the request originates does not match the IP address or address range ' 'specified on the SAS token, the request is not authenticated. For example, specifying ip=168.1.5.65' ' or ip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses.') c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_blob_permissions)), validator=get_permission_validator(t_blob_permissions)) c.argument('snapshot', snapshot_type) c.ignore('sas_token') c.argument('version_id', version_id_type) with self.argument_context('storage blob lease') as c: c.argument('blob_name', arg_type=blob_name_type) with self.argument_context('storage blob lease acquire') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. ' 'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.') c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for ' 'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease ' 'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob lease break') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.argument('lease_break_period', type=int, help=""This is the proposed duration of seconds that the lease should continue before it is broken, "" ""between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "" ""on the lease. If longer, the time remaining on the lease is used. A new lease will not be "" ""available before the break period has expired, but the lease may be held for longer than the break "" ""period. If this header does not appear with a break operation, a fixed-duration lease breaks after "" ""the remaining lease period elapses, and an infinite lease breaks immediately."") c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob lease change') as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 ' '(Invalid request) if the proposed lease ID is not in the correct format.', required=True) c.extra('lease_id', help='Required if the blob has an active lease.', required=True) c.extra('if_tags_match_condition', tags_condition_type) for item in ['release', 'renew']: with self.argument_context('storage blob lease {}'.format(item)) as c: c.register_precondition_options() c.register_lease_blob_arguments() c.extra('lease_id', help='Required if the blob has an active lease.', required=True) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob list') as c: from .track2_util import get_include_help_string t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_container_arguments() c.argument('delimiter', help='When the request includes this parameter, the operation returns a BlobPrefix element in the ' 'result list that acts as a placeholder for all blobs whose names begin with the same substring ' 'up to the appearance of the delimiter character. The delimiter may be a single character or a ' 'string.') c.argument('include', help=""Specify one or more additional datasets to include in the response. "" ""Options include: {}. Can be combined."".format(get_include_help_string(t_blob_include)), validator=validate_included_datasets_v2) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') for item in ['show', 'update']: with self.argument_context('storage blob metadata {}'.format(item), resource_type=CUSTOM_DATA_STORAGE_BLOB) \ as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('snapshot', snapshot_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob service-properties delete-policy update') as c: c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.') c.argument('days_retained', type=int, help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c: c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete', help='Enables soft-delete.') c.argument('delete_retention_period', type=int, arg_group='Soft Delete', help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(), help='Enables static-website.') c.argument('index_document', help='The default name of the index page under each directory.', arg_group='Static Website') c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website', help='The absolute path of the custom 404 page.') c.argument('default_index_document_path', options_list='--default-index-path', is_preview=True, help='Absolute path of the default index page.', arg_group='Static Website') with self.argument_context('storage blob set-tier', resource_type=CUSTOM_DATA_STORAGE_BLOB) as c: c.register_blob_arguments() c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page'))) c.extra('tier', tier_type, validator=blob_tier_validator, required=True) c.argument('rehydrate_priority', rehydrate_priority_type, is_preview=True) c.extra('version_id', version_id_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob show') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('snapshot', snapshot_type) c.extra('lease', lease_type) c.argument('version_id', version_id_type) with self.argument_context('storage blob snapshot') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('lease', lease_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob undelete', resource_type=CUSTOM_DATA_STORAGE_BLOB) as c: c.register_blob_arguments() with self.argument_context('storage blob tag list') as c: c.register_blob_arguments() c.extra('version_id', version_id_type) c.extra('snapshot', snapshot_type) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob tag set') as c: c.register_blob_arguments() c.extra('version_id', version_id_type) c.argument('tags', tags_type, required=True) c.extra('if_tags_match_condition', tags_condition_type) with self.argument_context('storage blob upload') as c: from ._validators import validate_encryption_scope_client_params, validate_upload_blob from .sdkutil import get_blob_types t_blob_content_settings = self.get_sdk('_models#ContentSettings', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_blob_arguments() c.register_precondition_options() c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group=""Content Control"") c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(), help='Path of the file to upload as the blob content.', validator=validate_upload_blob) c.argument('data', help='The blob data to upload.', required=False, is_preview=True, min_api='2019-02-02') c.argument('length', type=int, help='Number of bytes to read from the stream. This is optional, but should be ' 'supplied for optimal performance. Cooperate with --data.', is_preview=True, min_api='2019-02-02') c.argument('overwrite', arg_type=get_three_state_flag(), arg_group=""Additional Flags"", is_preview=True, help='Whether the blob to be uploaded should overwrite the current data. If True, blob upload ' 'operation will overwrite the existing data. If set to False, the operation will fail with ' 'ResourceExistsError. The exception to the above is with Append blob types: if set to False and the ' 'data already exists, an error will not be raised and the data will be appended to the existing ' 'blob. If set overwrite=True, then the existing append blob will be deleted, and a new one created. ' 'Defaults to False.') c.argument('max_connections', type=int, arg_group=""Additional Flags"", help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') c.extra('maxsize_condition', type=int, arg_group=""Content Control"", help='The max length in bytes permitted for the append blob.') c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type, arg_type=get_enum_type(get_blob_types()), arg_group=""Additional Flags"") c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group=""Content Control"") c.extra('no_progress', progress_type, validator=add_upload_progress_callback, arg_group=""Additional Flags"") c.extra('tier', tier_type, validator=blob_tier_validator, arg_group=""Additional Flags"") c.argument('encryption_scope', validator=validate_encryption_scope_client_params, help='A predefined encryption scope used to encrypt the data on the service.', arg_group=""Additional Flags"") c.argument('lease_id', help='Required if the blob has an active lease.') c.extra('tags', arg_type=tags_type, arg_group=""Additional Flags"") c.argument('metadata', arg_group=""Additional Flags"") c.argument('timeout', arg_group=""Additional Flags"") with self.argument_context('storage blob upload-batch') as c: from .sdkutil import get_blob_types t_blob_content_settings = self.get_sdk('_models#ContentSettings', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_precondition_options() c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control') c.ignore('source_files', 'destination_container_name') c.argument('source', options_list=('--source', '-s')) c.argument('destination', options_list=('--destination', '-d')) c.argument('max_connections', type=int, help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') c.argument('maxsize_condition', arg_group='Content Control') c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control') c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types())) c.extra('no_progress', progress_type) c.extra('tier', tier_type, is_preview=True) c.extra('overwrite', overwrite_type, is_preview=True) with self.argument_context('storage blob query') as c: from ._validators import validate_text_configuration c.register_blob_arguments() c.register_precondition_options() line_separator = CLIArgumentType(help=""The string used to separate records."", default='\n') column_separator = CLIArgumentType(help=""The string used to separate columns."", default=',') quote_char = CLIArgumentType(help=""The string used to quote a specific field."", default='""') record_separator = CLIArgumentType(help=""The string used to separate records."", default='\n') escape_char = CLIArgumentType(help=""The string used as an escape character. Default to empty."", default="""") has_header = CLIArgumentType( arg_type=get_three_state_flag(), help=""Whether the blob data includes headers in the first line. "" ""The default value is False, meaning that the data will be returned inclusive of the first line. "" ""If set to True, the data will be returned exclusive of the first line."", default=False) c.extra('lease', options_list='--lease-id', help='Required if the blob has an active lease.') c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression ' 'is 256KiB. For more information about the expression syntax, please see ' 'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference') c.extra('input_format', arg_type=get_enum_type(['csv', 'json', 'parquet']), validator=validate_text_configuration, help='Serialization type of the data currently stored in the blob. ' 'The default is to treat the blob data as CSV data formatted in the default dialect.' 'The blob data will be reformatted according to that profile when blob format is specified. ' 'If you choose `json`, please specify `Input Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Input Delimited Text Configuration Arguments`.') c.extra('output_format', arg_type=get_enum_type(['csv', 'json']), help='Output serialization type for the data stream. ' 'By default the data will be returned as it is represented in the blob. ' 'By providing an output format, the blob data will be reformatted according to that profile. ' 'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.' 'By default data with input_format of `parquet` will have the output_format of `csv`') c.extra('in_line_separator', arg_group='Input Json Text Configuration', arg_type=line_separator) c.extra('in_column_separator', arg_group='Input Delimited Text Configuration', arg_type=column_separator) c.extra('in_quote_char', arg_group='Input Delimited Text Configuration', arg_type=quote_char) c.extra('in_record_separator', arg_group='Input Delimited Text Configuration', arg_type=record_separator) c.extra('in_escape_char', arg_group='Input Delimited Text Configuration', arg_type=escape_char) c.extra('in_has_header', arg_group='Input Delimited Text Configuration', arg_type=has_header) c.extra('out_line_separator', arg_group='Output Json Text Configuration', arg_type=line_separator) c.extra('out_column_separator', arg_group='Output Delimited Text Configuration', arg_type=column_separator) c.extra('out_quote_char', arg_group='Output Delimited Text Configuration', arg_type=quote_char) c.extra('out_record_separator', arg_group='Output Delimited Text Configuration', arg_type=record_separator) c.extra('out_escape_char', arg_group='Output Delimited Text Configuration', arg_type=escape_char) c.extra('out_has_header', arg_group='Output Delimited Text Configuration', arg_type=has_header) c.extra('result_file', help='Specify the file path to save result.') c.ignore('input_config') c.ignore('output_config') with self.argument_context('storage container') as c: c.argument('container_name', container_name_type, options_list=('--name', '-n')) with self.argument_context('storage container generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_container_permissions = self.get_sdk('_models#ContainerSasPermissions', resource_type=CUSTOM_DATA_STORAGE_BLOB) c.register_sas_arguments() c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name', 'get_container_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_container_permissions)), validator=get_permission_validator(t_container_permissions)) c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed' 'using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed' 'using this shared access signature.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help=""Indicates that this command return the SAS signed with the user delegation key. "" ""The expiry parameter and '--auth-mode login' are required if this argument is specified. "") c.ignore('sas_token') c.argument('full_uri', action='store_true', is_preview=True, help='Indicate that this command return the full blob URI and the shared access signature token.') with self.argument_context('storage container list') as c: c.extra('timeout', timeout_type) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('include_metadata', arg_type=get_three_state_flag(), help='Specify that container metadata to be returned in the response.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') c.argument('include_deleted', arg_type=get_three_state_flag(), min_api='2020-02-10', help='Specify that deleted containers to be returned in the response. This is for container restore ' 'enabled account. The default value is `False`') with self.argument_context('storage container restore') as c: c.argument('deleted_container_name', options_list=['--name', '-n'], help='Specify the name of the deleted container to restore.') c.argument('deleted_container_version', options_list=['--deleted-version'], help='Specify the version of the deleted container to restore.') c.argument('new_name', help='The new name for the deleted container to be restored to.') c.extra('timeout', timeout_type) " 1723,"def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1,9,9],[1,9,9],[1,9,9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) ","def permutation_importance(estimator, X, y, scoring=None, n_repeats=5, n_jobs=None, random_state=None): """"""Permutation importance for feature evaluation [BRE]_. The :term:`estimator` is required to be a fitted estimator. `X` can be the data set used to train the estimator or a hold-out set. The permutation importance of a feature is calculated as follows. First, a baseline metric, defined by :term:`scoring`, is evaluated on a (potentially different) dataset defined by the `X`. Next, a feature column from the validation set is permuted and the metric is evaluated again. The permutation importance is defined to be the difference between the baseline metric and metric from permutating the feature column. Read more in the :ref:`User Guide `. Parameters ---------- estimator : object An estimator that has already been :term:`fitted` and is compatible with :term:`scorer`. X : ndarray or DataFrame, shape (n_samples, n_features) Data on which permutation importance will be computed. y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) Targets for supervised or `None` for unsupervised. scoring : string, callable or None, default=None Scorer to use. It can be a single string (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`). If None, the estimator's default scorer is used. n_repeats : int, default=5 Number of times to permute a feature. n_jobs : int or None, default=None The number of jobs to use for the computation. `None` means 1 unless in a :obj:`joblib.parallel_backend` context. `-1` means using all processors. See :term:`Glossary ` for more details. random_state : int, RandomState instance, or None, default=None Pseudo-random number generator to control the permutations of each feature. See :term:`random_state`. Returns ------- result : Bunch Dictionary-like object, with attributes: importances_mean : ndarray, shape (n_features, ) Mean of feature importance over `n_repeats`. importances_std : ndarray, shape (n_features, ) Standard deviation over `n_repeats`. importances : ndarray, shape (n_features, n_repeats) Raw permutation importance scores. References ---------- .. [BRE] L. Breiman, ""Random Forests"", Machine Learning, 45(1), 5-32, 2001. https://doi.org/10.1023/A:1010933404324 Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.inspection import permutation_importance >>> X = [[1, 9, 9], [1, 9, 9], [1, 9, 9], ... [0,9,9],[0,9,9],[0,9,9]] >>> y = [1,1,1,0,0,0] >>> clf = LogisticRegression().fit(X,y) LogisticRegression(...) >>> result = permutation_importance(clf, X, y, n_repeats=10, ... random_state=0) {'importances_mean': array([0.5, 0. , 0. ]), 'importances_std': array([0.16666667, 0. , 0. ]), 'importances': array([[0.33333333, 0.66666667], [0. , 0. ], [0. , 0. ]])} >>> result.importances_mean array([0.5, 0. , 0. ]) >>> result.importances_std array([0.2236068, 0. , 0. ]) """""" if not hasattr(X, ""iloc""): X = check_array(X, force_all_finite='allow-nan', dtype=None) # Precompute random seed from the random state to be used # to get a fresh independent RandomState instance for each # parallel call to _calculate_permutation_scores, irrespective of # the fact that variables are shared or not depending on the active # joblib backend (sequential, thread-based or process-based). random_state = check_random_state(random_state) random_seed = random_state.randint(np.iinfo(np.int32).max + 1) scorer = check_scoring(estimator, scoring=scoring) baseline_score = scorer(estimator, X, y) scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)( estimator, X, y, col_idx, random_seed, n_repeats, scorer ) for col_idx in range(X.shape[1])) importances = baseline_score - np.array(scores) return Bunch(importances_mean=np.mean(importances, axis=1), importances_std=np.std(importances, axis=1), importances=importances) " 56690,"def get_last_updated_time() -> Optional[str]: """"""Gets date of last import job. Last updated dates are read from a local file. If no file exists, None is returned. Last updated date is expected to be in HTTP-date format: https://httpwg.org/specs/rfc7231.html#http.date returns last updated date string or None """""" last_updated = None if path.exists(LAST_UPDATED_TIME): with open(LAST_UPDATED_TIME, 'r') as f: last_updated = f.readline() return last_updated ","def get_last_updated_time() -> Optional[str]: """"""Gets date of last import job. Last updated dates are read from a local file. If no file exists, None is returned. Last updated date is expected to be in HTTP-date format: https://httpwg.org/specs/rfc7231.html#http.date returns last updated date string or None """""" if path.exists(LAST_UPDATED_TIME): with open(LAST_UPDATED_TIME, 'r') as f: return f.readline() return None " 17820,"def download(): response = urllib2.urlopen('https://raw.githubusercontent.com/dictation-toolbox/caster/develop/_caster.py') html = response.read() directory = finddirectory() filename = directory + '\\_caster.py' f = open(filename, 'w') f.write(html) ","def download(): response = urllib2.urlopen('https://raw.githubusercontent.com/dictation-toolbox/caster/develop/_caster.py') html = response.read() directory = finddirectory() filename = os.path.join(directory, '_caster.py') f = open(filename, 'w') f.write(html) " 17630,"def process_from_nodes(nodes): if not nodes: return node_names = [] for node in nodes: if hasattr(node, ""name""): node_names.append(node.name) else: print(""Something not very important happend in Blender memory"", node, type(node)) ng = nodes[0].id_data check_fake_user(ng) update_list = make_tree_from_nodes(node_names, ng) reset_error_some_nodes(ng, update_list) do_update(update_list, ng.nodes) ","def process_from_nodes(nodes): if not nodes: return node_names = [] for node in nodes: if hasattr(node, ""name""): node_names.append(node.name) else: print(""Something not very important happend in Blender memory"", node, type(node)) ng = nodes[0].id_data ng.use_fake_user = True update_list = make_tree_from_nodes(node_names, ng) reset_error_some_nodes(ng, update_list) do_update(update_list, ng.nodes) " 50044,"def _powershell_profile_content(conda_prefix): if on_win: conda_exe = join(conda_prefix, 'Scripts', 'conda.exe') else: conda_exe = join(conda_prefix, 'bin', 'conda') conda_powershell_module = dals("""""" #region conda initialize # !! Contents within this block are managed by 'conda init' !! If (Test-Path ""{conda_exe}"") { (& ""{conda_exe}"" ""shell.powershell"" ""hook"") | Out-String | Invoke-Expression } #endregion """""".format(conda_exe=conda_exe)) return conda_powershell_module ","def _powershell_profile_content(conda_prefix): if on_win: conda_exe = join(conda_prefix, 'Scripts', 'conda.exe') else: conda_exe = join(conda_prefix, 'bin', 'conda') conda_powershell_module = dals("""""" #region conda initialize # !! Contents within this block are managed by 'conda init' !! If (Test-Path ""{conda_exe}"") {{ (& ""{conda_exe}"" ""shell.powershell"" ""hook"") | Out-String | Invoke-Expression }} #endregion """""".format(conda_exe=conda_exe)) return conda_powershell_module " 12766,"def _preprocessed_interpreter_search_paths( env_tgt: EnvironmentTarget, _search_paths: Iterable[str], is_default: bool, ) -> tuple[str, ...]: """"""Checks for special search path strings, and errors if any are invalid for the environment. This will return: * The search paths, unaltered, for local/undefined environments, OR * The search paths, with invalid tokens removed, if the provided value was unaltered from the default value in the options system (see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`) * The search paths unaltered, if the search paths do not contain tokens invalid for this environment If the environment is non-local and there are invalid tokens for those environments, raise `ValueError`. """""" env = env_tgt.val search_paths = tuple(_search_paths) if isinstance(env, LocalEnvironmentTarget): return search_paths if env is None: return search_paths not_allowed = {"""", """", """", """", """"} if is_default: # Strip out the not-allowed special strings from search_paths. # An error will occur on the off chance the non-local environment expects pyenv # but there's nothing we can do here to detect it. return tuple(path for path in search_paths if path not in not_allowed) any_not_allowed = set(search_paths) & not_allowed if any_not_allowed: env_type = type(env) raise ValueError( f""`[python-bootstrap].search_paths` is configured to use local Python discovery "" f""tools, which do not work in {env_type.__name__} runtime environments. To fix this, "" f""set the value of `python_bootstrap_search_path` in the {env.alias} defined at "" f""`{env.address}` to contain only hardcoded paths or the `` special string."" ) return search_paths ","def _preprocessed_interpreter_search_paths( env_tgt: EnvironmentTarget, _search_paths: Iterable[str], is_default: bool, ) -> tuple[str, ...]: """"""Checks for special search path strings, and errors if any are invalid for the environment. This will return: * The search paths, unaltered, for local/undefined environments, OR * The search paths, with invalid tokens removed, if the provided value was unaltered from the default value in the options system (see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`) * The search paths unaltered, if the search paths do not contain tokens invalid for this environment If the environment is non-local and there are invalid tokens for those environments, raise `ValueError`. """""" env = env_tgt.val search_paths = tuple(_search_paths) if isinstance(env, LocalEnvironmentTarget): return search_paths if env is None: return search_paths not_allowed = {"""", """", """", """", """"} if is_default: # Strip out the not-allowed special strings from search_paths. # An error will occur on the off chance the non-local environment expects pyenv # but there's nothing we can do here to detect it. return tuple(path for path in search_paths if path not in not_allowed) any_not_allowed = set(search_paths) & not_allowed if any_not_allowed: env_type = type(env) raise ValueError( f""`[python-bootstrap].search_paths` is configured to use local Python discovery "" f""tools, which do not work in {env_type.__name__} runtime environments. To fix this, "" f""set the value of `python_bootstrap_search_path` in the environment `{env.alias}` (defined at "" f""`{env.address}` to contain only hardcoded paths or the `` special string."" ) return search_paths " 47314,"def check_marian_cfg_assumptions(marian_cfg): assumed_settings = { ""tied-embeddings-all"": True, ""layer-normalization"": False, ""right-left"": False, ""transformer-ffn-depth"": 2, ""transformer-aan-depth"": 2, ""transformer-no-projection"": False, ""transformer-postprocess-emb"": ""d"", ""transformer-postprocess"": ""dan"", # Dropout, add, normalize ""transformer-preprocess"": """", ""type"": ""transformer"", ""ulr-dim-emb"": 0, ""dec-cell-base-depth"": 2, ""dec-cell-high-depth"": 1, ""transformer-aan-nogate"": False, } for k, v in assumed_settings.items(): actual = marian_cfg[k] if not (actual == v): raise ValueError(f""Unexpected config value for {k} expected {v} got {actual}"") check_equal(marian_cfg, ""transformer-ffn-activation"", ""transformer-aan-activation"") check_equal(marian_cfg, ""transformer-ffn-depth"", ""transformer-aan-depth"") check_equal(marian_cfg, ""transformer-dim-ffn"", ""transformer-dim-aan"") ","def check_marian_cfg_assumptions(marian_cfg): assumed_settings = { ""tied-embeddings-all"": True, ""layer-normalization"": False, ""right-left"": False, ""transformer-ffn-depth"": 2, ""transformer-aan-depth"": 2, ""transformer-no-projection"": False, ""transformer-postprocess-emb"": ""d"", ""transformer-postprocess"": ""dan"", # Dropout, add, normalize ""transformer-preprocess"": """", ""type"": ""transformer"", ""ulr-dim-emb"": 0, ""dec-cell-base-depth"": 2, ""dec-cell-high-depth"": 1, ""transformer-aan-nogate"": False, } for k, v in assumed_settings.items(): actual = marian_cfg[k] if actual != v: raise ValueError(f""Unexpected config value for {k} expected {v} got {actual}"") check_equal(marian_cfg, ""transformer-ffn-activation"", ""transformer-aan-activation"") check_equal(marian_cfg, ""transformer-ffn-depth"", ""transformer-aan-depth"") check_equal(marian_cfg, ""transformer-dim-ffn"", ""transformer-dim-aan"") " 41363,"def _test_unfccc_tier1(): # test that UNFCCC API returns expected data and units exp = IamDataFrame( UNFCCC_DF, **INDEX_ARGS, region=""DEU"", variable=""Emissions|CH4|Agriculture"", unit=""kt CH4"", ) obs = read_unfccc(party_code=""DEU"", gases=[""CH4""], tier=1) # assert that the data is similar horizon = [1990, 1991, 1992] assert_iamframe_equal(obs.filter(year=horizon, variable=""*Agri*""), exp) # assert that variables are similar types = [ ""Agriculture"", ""Energy"", ""Industrial Processes and Product Use"", ""Land Use, Land-Use Change and Forestry"", ""Waste"", ] print([f""Emissions|CH4|{i}"" for i in types]) print(obs.variable) assert obs.variable == [f""Emissions|CH4|{i}"" for i in types] # assert that the unit is merged as expected assert obs.unit == [""kt CH4""] ","def test_unfccc_tier1(): # test that UNFCCC API returns expected data and units exp = IamDataFrame( UNFCCC_DF, **INDEX_ARGS, region=""DEU"", variable=""Emissions|CH4|Agriculture"", unit=""kt CH4"", ) obs = read_unfccc(party_code=""DEU"", gases=[""CH4""], tier=1) # assert that the data is similar horizon = [1990, 1991, 1992] assert_iamframe_equal(obs.filter(year=horizon, variable=""*Agri*""), exp) # assert that variables are similar types = [ ""Agriculture"", ""Energy"", ""Industrial Processes and Product Use"", ""Land Use, Land-Use Change and Forestry"", ""Waste"", ] print([f""Emissions|CH4|{i}"" for i in types]) print(obs.variable) assert obs.variable == [f""Emissions|CH4|{i}"" for i in types] # assert that the unit is merged as expected assert obs.unit == [""kt CH4""] " 20024,"def cluster_contour_splitimg(img, grouped_contour_indexes, contours, hierarchy, outdir=None, file=None, filenames=None): """""" This function takes clustered contours and splits them into multiple images, also does a check to make sure that the number of inputted filenames matches the number of clustered contours. Inputs: img = image data grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours contours = contours to cluster, output of cluster_contours hierarchy = hierarchy of contours, output of find_objects outdir = out directory for output images file = the name of the input image to use as a plantcv name, output of filename from read_image function filenames = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes) Returns: output_path = array of paths to output images :param img: numpy.ndarray :param grouped_contour_indexes: list :param contours: list :param hierarchy: numpy.ndarray :param outdir: str :param file: str :param filenames: str :return output_path: str """""" params.device += 1 sys.stderr.write( 'This function has been updated to include object hierarchy so object holes can be included\n') i = datetime.now() timenow = i.strftime('%m-%d-%Y_%H:%M:%S') if file is None: filebase = timenow else: filebase = os.path.splitext(file)[0] if filenames is None: namelist = [] for x in range(0, len(grouped_contour_indexes)): namelist.append(x) else: with open(filenames, 'r') as n: namelist = n.read().splitlines() n.close() # make sure the number of objects matches the namelist, and if not, remove the smallest grouped countor # removing contours is not ideal but the lists don't match there is a warning to check output if len(namelist) == len(grouped_contour_indexes): corrected_contour_indexes = grouped_contour_indexes elif len(namelist) < len(grouped_contour_indexes): print(""Warning number of names is less than number of grouped contours, attempting to fix, to double check "" ""output"") diff = len(grouped_contour_indexes) - len(namelist) size = [] for i, x in enumerate(grouped_contour_indexes): totallen = [] for a in x: g = i la = len(contours[a]) totallen.append(la) sumlen = np.sum(totallen) size.append((sumlen, g, i)) dtype = [('len', int), ('group', list), ('index', int)] lencontour = np.array(size, dtype=dtype) lencontour = np.sort(lencontour, order='len') rm_contour = lencontour[diff:] rm_contour = np.sort(rm_contour, order='group') corrected_contour_indexes = [] for x in rm_contour: index = x[2] corrected_contour_indexes.append(grouped_contour_indexes[index]) elif len(namelist) > len(grouped_contour_indexes): print(""Warning number of names is more than number of grouped contours, double check output"") diff = len(namelist) - len(grouped_contour_indexes) namelist = namelist[0:-diff] corrected_contour_indexes = grouped_contour_indexes # create filenames group_names = [] group_names1 = [] for i, x in enumerate(namelist): plantname = str(filebase) + '_' + str(x) + '_p' + str(i) + '.png' maskname = str(filebase) + '_' + str(x) + '_p' + str(i) + '_mask.png' group_names.append(plantname) group_names1.append(maskname) # split image output_path = [] output_imgs = [] output_masks = [] for y, x in enumerate(corrected_contour_indexes): if outdir is not None: savename = os.path.join(str(outdir), group_names[y]) savename1 = os.path.join(str(outdir), group_names1[y]) else: savename = os.path.join(""."", group_names[y]) savename1 = os.path.join(""."", group_names1[y]) iy, ix, iz = np.shape(img) mask = np.zeros((iy, ix, 3), dtype=np.uint8) masked_img = np.copy(img) for a in x: if hierarchy[0][a][3] > -1: cv2.drawContours(mask, contours, a, (0, 0, 0), -1, lineType=8, hierarchy=hierarchy) else: cv2.drawContours(mask, contours, a, (255, 255, 255), -1, lineType=8, hierarchy=hierarchy) mask_binary = mask[:, :, 0] if np.sum(mask_binary) == 0: pass else: retval, mask_binary = cv2.threshold(mask_binary, 254, 255, cv2.THRESH_BINARY) masked1 = apply_mask(masked_img, mask_binary, 'white') output_imgs.append(masked1) output_masks.append(mask_binary) if outdir is not None: print_image(masked1, savename) print_image(mask_binary, savename1) output_path.append(savename) if params.debug == 'print': print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + '_clusters.png')) print_image(mask_binary, os.path.join(params.debug_outdir, str(params.device) + '_clusters_mask.png')) elif params.debug == 'plot': plot_image(masked1) plot_image(mask_binary, cmap='gray') return output_path, output_imgs, output_masks ","def cluster_contour_splitimg(img, grouped_contour_indexes, contours, hierarchy, outdir=None, file=None, filenames=None): """""" This function takes clustered contours and splits them into multiple images, also does a check to make sure that the number of inputted filenames matches the number of clustered contours. Inputs: img = image data grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours contours = contours to cluster, output of cluster_contours hierarchy = hierarchy of contours, output of find_objects outdir = out directory for output images file = the name of the input image to use as a plantcv name, output of filename from read_image function filenames = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes) Returns: output_path = array of paths to output images :param img: numpy.ndarray :param grouped_contour_indexes: list :param contours: list :param hierarchy: numpy.ndarray :param outdir: str :param file: str :param filenames: str :return output_path: str """""" params.device += 1 sys.stderr.write( 'This function has been updated to include object hierarchy so object holes can be included\n') i = datetime.now() timenow = i.strftime('%m-%d-%Y_%H:%M:%S') if file is None: filebase = timenow else: filebase = os.path.splitext(file)[0] if filenames is None: namelist = [] for x in range(0, len(grouped_contour_indexes)): namelist.append(x) else: with open(filenames, 'r') as n: namelist = n.read().splitlines() n.close() # make sure the number of objects matches the namelist, and if not, remove the smallest grouped countor # removing contours is not ideal but the lists don't match there is a warning to check output if len(namelist) == len(grouped_contour_indexes): corrected_contour_indexes = grouped_contour_indexes elif len(namelist) < len(grouped_contour_indexes): print(""Warning number of names is less than number of grouped contours, attempting to fix, to double check "" ""output"") diff = len(grouped_contour_indexes) - len(namelist) size = [] for i, x in enumerate(grouped_contour_indexes): totallen = [] for a in x: g = i la = len(contours[a]) totallen.append(la) sumlen = np.sum(totallen) size.append((sumlen, g, i)) dtype = [('len', int), ('group', list), ('index', int)] lencontour = np.array(size, dtype=dtype) lencontour = np.sort(lencontour, order='len') rm_contour = lencontour[diff:] rm_contour = np.sort(rm_contour, order='group') corrected_contour_indexes = [] for x in rm_contour: index = x[2] corrected_contour_indexes.append(grouped_contour_indexes[index]) elif len(namelist) > len(grouped_contour_indexes): print(""Warning number of names is more than number of grouped contours, double check output"") diff = len(namelist) - len(grouped_contour_indexes) namelist = namelist[0:-diff] corrected_contour_indexes = grouped_contour_indexes # create filenames group_names = [] group_names1 = [] for i, x in enumerate(namelist): plantname = str(filebase) + '_' + str(x) + '_p' + str(i) + '.png' maskname = str(filebase) + '_' + str(x) + '_p' + str(i) + '_mask.png' group_names.append(plantname) group_names1.append(maskname) # split image output_path = [] output_imgs = [] output_masks = [] for y, x in enumerate(corrected_contour_indexes): if outdir is not None: savename = os.path.join(str(outdir), group_names[y]) savename1 = os.path.join(str(outdir), group_names1[y]) else: savename = os.path.join(""."", group_names[y]) savename1 = os.path.join(""."", group_names1[y]) iy, ix = np.shape(img)[:2] mask = np.zeros((iy, ix, 3), dtype=np.uint8) masked_img = np.copy(img) for a in x: if hierarchy[0][a][3] > -1: cv2.drawContours(mask, contours, a, (0, 0, 0), -1, lineType=8, hierarchy=hierarchy) else: cv2.drawContours(mask, contours, a, (255, 255, 255), -1, lineType=8, hierarchy=hierarchy) mask_binary = mask[:, :, 0] if np.sum(mask_binary) == 0: pass else: retval, mask_binary = cv2.threshold(mask_binary, 254, 255, cv2.THRESH_BINARY) masked1 = apply_mask(masked_img, mask_binary, 'white') output_imgs.append(masked1) output_masks.append(mask_binary) if outdir is not None: print_image(masked1, savename) print_image(mask_binary, savename1) output_path.append(savename) if params.debug == 'print': print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + '_clusters.png')) print_image(mask_binary, os.path.join(params.debug_outdir, str(params.device) + '_clusters_mask.png')) elif params.debug == 'plot': plot_image(masked1) plot_image(mask_binary, cmap='gray') return output_path, output_imgs, output_masks " 43675,"def convert_observable(qubit_observable, wires=None): r""""""Converts an OpenFermion :class:`~.QubitOperator` operator to a Pennylane VQE observable **Example usage** >>> h_of = decompose_hamiltonian('h2', './pyscf/sto-3g/') >>> h_pl = convert_observable(h_of) >>> h_pl.coeffs [-0.04207898+0.j 0.17771287+0.j 0.17771287+0.j -0.2427428 +0.j -0.2427428 +0.j 0.17059738+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.12293305+0.j 0.16768319+0.j 0.16768319+0.j 0.12293305+0.j 0.17627641+0.j] Args: qubit_observable (QubitOperator): Observable represented as an OpenFermion ``QubitOperator`` wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz. For types Wires/list/tuple, each item in the iterable represents a wire label corresponding to the qubit number equal to its index. For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted. If None, will use identiy map. Defaults to None. Returns: (pennylane.Hamiltonian): Pennylane VQE observable. PennyLane :class:`~.Hamiltonian` represents any operator expressed as linear combinations of observables, e.g., :math:`\sum_{k=0}^{N-1} c_k O_k`. """""" return Hamiltonian(*_qubit_operator_to_terms(qubit_observable, wires=wires)) ","def convert_observable(qubit_observable, wires=None): r""""""Converts an OpenFermion :class:`~.QubitOperator` operator to a Pennylane VQE observable **Example** >>> h_of = decompose_hamiltonian('h2', './pyscf/sto-3g/') >>> h_pl = convert_observable(h_of) >>> h_pl.coeffs [-0.04207898+0.j 0.17771287+0.j 0.17771287+0.j -0.2427428 +0.j -0.2427428 +0.j 0.17059738+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.12293305+0.j 0.16768319+0.j 0.16768319+0.j 0.12293305+0.j 0.17627641+0.j] Args: qubit_observable (QubitOperator): Observable represented as an OpenFermion ``QubitOperator`` wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz. For types Wires/list/tuple, each item in the iterable represents a wire label corresponding to the qubit number equal to its index. For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted. If None, will use identiy map. Defaults to None. Returns: (pennylane.Hamiltonian): Pennylane VQE observable. PennyLane :class:`~.Hamiltonian` represents any operator expressed as linear combinations of observables, e.g., :math:`\sum_{k=0}^{N-1} c_k O_k`. """""" return Hamiltonian(*_qubit_operator_to_terms(qubit_observable, wires=wires)) " 54091,"def attach_load(n): substation_lv_i = n.buses.index[n.buses['substation_lv']] regions = (gpd.read_file(snakemake.input.regions).set_index('name') .reindex(substation_lv_i)) opsd_load = load_opsd_loaddata(load_fn=snakemake.input.load, countries=snakemake.config['countries']) # Scalling data according to scalling factor in config.yaml logger.info(f""Load data scalled with scalling factior {snakemake.config['load']['scaling_factor']}."") opsd_load = opsd_load * snakemake.config.get('load', {}).get('scaling_factor', 1.0) # Convert to naive UTC (has to be explicit since pandas 0.24) opsd_load.index = opsd_load.index.tz_localize(None) nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') def normed(x): return x.divide(x.sum()) def upsample(cntry, group): l = opsd_load[cntry] if len(group) == 1: return pd.DataFrame({group.index[0]: l}) else: nuts3_cntry = nuts3.loc[nuts3.country == cntry] transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, normed=False).T.tocsr() gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values), index=group.index) pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values), index=group.index) # relative factors 0.6 and 0.4 have been determined from a linear # regression on the country to continent load data (refer to vresutils.load._upsampling_weights) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index) load = pd.concat([upsample(cntry, group) for cntry, group in regions.geometry.groupby(regions.country)], axis=1) n.madd(""Load"", substation_lv_i, bus=substation_lv_i, p_set=load) ","def attach_load(n): substation_lv_i = n.buses.index[n.buses['substation_lv']] regions = (gpd.read_file(snakemake.input.regions).set_index('name') .reindex(substation_lv_i)) opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True) .filter(items=snakemake.config['countries'])) # Scalling data according to scalling factor in config.yaml logger.info(f""Load data scalled with scalling factior {snakemake.config['load']['scaling_factor']}."") opsd_load = opsd_load * snakemake.config.get('load', {}).get('scaling_factor', 1.0) # Convert to naive UTC (has to be explicit since pandas 0.24) opsd_load.index = opsd_load.index.tz_localize(None) nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') def normed(x): return x.divide(x.sum()) def upsample(cntry, group): l = opsd_load[cntry] if len(group) == 1: return pd.DataFrame({group.index[0]: l}) else: nuts3_cntry = nuts3.loc[nuts3.country == cntry] transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, normed=False).T.tocsr() gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values), index=group.index) pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values), index=group.index) # relative factors 0.6 and 0.4 have been determined from a linear # regression on the country to continent load data (refer to vresutils.load._upsampling_weights) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index) load = pd.concat([upsample(cntry, group) for cntry, group in regions.geometry.groupby(regions.country)], axis=1) n.madd(""Load"", substation_lv_i, bus=substation_lv_i, p_set=load) " 20286,"def is_irix() -> bool: return platform.system().lower() == 'irix64' ","def is_irix() -> bool: return platform.system().lower().startswith('irix') " 6637,"def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in frappe.db.sql(""""""select name, holiday_date, description from `tabHoliday` where parent=%s and holiday_date between %s and %s"""""", (applicable_holiday_list, start, end), as_dict=True): events.append({ ""doctype"": ""Holiday"", ""from_date"": holiday.holiday_date, ""to_date"": holiday.holiday_date, ""title"": _(""Holiday"") + "": "" + cstr(holiday.description), ""name"": holiday.name }) ","def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in frappe.db.sql(""""""select name, holiday_date, description from `tabHoliday` where parent=%s and holiday_date between %s and %s"""""", (applicable_holiday_list, start, end), as_dict=True): events.append({ ""doctype"": ""Holiday"", ""from_date"": holiday.holiday_date, ""to_date"": holiday.holiday_date, ""title"": _(""Holiday"") + "": "" + cstr(holiday.description), ""name"": holiday.name }) " 32168,"def get_event_types(client, method, token): """""" Call the client module to fetch event types using the input parameters :param client: instace of client to communicate with server :param method: Requests method to be used :param token: server access token :return: alert event types """""" eTypeAlias = {} params = { 'token': token } eventtypes_url = r'/api/v2/events/types' eventTypes = client.get_event_types(method, eventtypes_url, params) if eventTypes != None: for eachone in eventTypes: eTypeAlias[eachone['type']] = eachone['alias'] return eTypeAlias ","def get_event_types(client, method, token): """""" Call the client module to fetch event types using the input parameters :param client: instace of client to communicate with server :param method: Requests method to be used :param token: server access token :return: alert event types """""" eTypeAlias = {} params = { 'token': token } eventtypes_url = r'/api/v2/events/types' eventTypes = client.get_event_types(method, eventtypes_url, params) if eventTypes: for eachone in eventTypes: eTypeAlias[eachone['type']] = eachone['alias'] return eTypeAlias " 32138,"def remove_duplicates_from_list_arg(args, field): """""" Removes duplicates from a dict after calling argToList. For example: args: {'ids': ""1,2,1""} , field='ids' The return output will be [""1"",""2""] :type args: dict :param args: Args to be converted (required) :type field: str :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """""" convert_to_list = argToList(args.get(field)) return list(set(convert_to_list)) ","def remove_duplicates_from_list_arg(args, field): """""" Removes duplicates from a list after calling argToList. For example: args: {'ids': ""1,2,1""} , field='ids' The return output will be [""1"",""2""] :type args: dict :param args: Args to be converted (required) :type field: str :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """""" convert_to_list = argToList(args.get(field)) return list(set(convert_to_list)) " 42337,"def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error ","def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. If the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error " 33335,"def transform_covid19_faba_data(worker: TaskSpec, records: List[dict]) -> List[dict]: logger.info(format_log(f""Transforming data"", name=worker.name, action=""Transform"")) start = perf_counter() results = {} for record in records: es_id_field = record[worker.field_for_es_id] disinct_award_key = record.pop(""financial_account_distinct_award_key"") award_id = record.pop(""award_id"") award_type = record.pop(""type"") generated_unique_award_id = record.pop(""generated_unique_award_id"") total_loan_value = record.pop(""total_loan_value"") obligated_sum = record.get(""transaction_obligated_amount"") or 0 # record value for key may be None outlay_sum = ( (record.get(""gross_outlay_amount_by_award_cpe"") or 0) + (record.get(""ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe"") or 0) + (record.get(""ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe"") or 0) ) # record value for key may be None temp_key = disinct_award_key if temp_key not in results: results[temp_key] = { ""financial_account_distinct_award_key"": disinct_award_key, ""award_id"": award_id, ""type"": award_type, ""generated_unique_award_id"": generated_unique_award_id, ""total_loan_value"": total_loan_value, ""financial_accounts_by_award"": list(), ""obligated_sum"": 0, ""outlay_sum"": 0, ""_id"": es_id_field, } results[temp_key][""obligated_sum""] += obligated_sum if record.get(""is_final_balances_for_fy""): results[temp_key][""outlay_sum""] += outlay_sum results[temp_key][""financial_accounts_by_award""].append(record) if len(results) != len(records): msg = f""Transformed {len(records)} database records into {len(results)} documents for ingest"" logger.info(format_log(msg, name=worker.name, action=""Transform"")) msg = f""Transformation operation took {perf_counter() - start:.2f}s"" logger.info(format_log(msg, name=worker.name, action=""Transform"")) return list(results.values()) # don't need the dict key, return a list of the dict values ","def transform_covid19_faba_data(worker: TaskSpec, records: List[dict]) -> List[dict]: logger.info(format_log(f""Transforming data"", name=worker.name, action=""Transform"")) start = perf_counter() results = {} for record in records: es_id_field = record[worker.field_for_es_id] disinct_award_key = record.pop(""financial_account_distinct_award_key"") award_id = record.pop(""award_id"") award_type = record.pop(""type"") generated_unique_award_id = record.pop(""generated_unique_award_id"") total_loan_value = record.pop(""total_loan_value"") obligated_sum = record.get(""transaction_obligated_amount"") or 0 # record value for key may be None outlay_sum = ( (record.get(""gross_outlay_amount_by_award_cpe"") or 0) + (record.get(""ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe"") or 0) + (record.get(""ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe"") or 0) ) # record value for any key may be None temp_key = disinct_award_key if temp_key not in results: results[temp_key] = { ""financial_account_distinct_award_key"": disinct_award_key, ""award_id"": award_id, ""type"": award_type, ""generated_unique_award_id"": generated_unique_award_id, ""total_loan_value"": total_loan_value, ""financial_accounts_by_award"": list(), ""obligated_sum"": 0, ""outlay_sum"": 0, ""_id"": es_id_field, } results[temp_key][""obligated_sum""] += obligated_sum if record.get(""is_final_balances_for_fy""): results[temp_key][""outlay_sum""] += outlay_sum results[temp_key][""financial_accounts_by_award""].append(record) if len(results) != len(records): msg = f""Transformed {len(records)} database records into {len(results)} documents for ingest"" logger.info(format_log(msg, name=worker.name, action=""Transform"")) msg = f""Transformation operation took {perf_counter() - start:.2f}s"" logger.info(format_log(msg, name=worker.name, action=""Transform"")) return list(results.values()) # don't need the dict key, return a list of the dict values " 757,"def axial_kurtosis(dki_params, min_kurtosis=-3./7, max_kurtosis=10, analytical=True): r"""""" Computes axial Kurtosis (AK) from the kurtosis tensor [1]_, [2]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor min_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are smaller than `min_kurtosis` are replaced with `min_kurtosis`. Default = -3./7 (theoretical kurtosis limit for regions that consist of water confined to spherical pores [3]_) max_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are larger than `max_kurtosis` are replaced with `max_kurtosis`. Default = 10 analytical : bool (optional) If True, AK is calculated from rotated diffusion kurtosis tensor, otherwise it will be computed from the apparent diffusion kurtosis values along the principal axis of the diffusion tensor (see notes). Default is set to True. Returns ------- ak : array Calculated AK. Notes ----- AK is defined as the directional kurtosis parallel to the fiber's main direction e1 [1]_, [2]_. You can compute AK using to approaches: 1) AK is calculated from rotated diffusion kurtosis tensor [2]_, i.e.: .. math:: AK = \hat{W}_{1111} \frac{(\lambda_{1}+\lambda_{2}+\lambda_{3})^2}{(9 \lambda_{1}^2)} 2) AK can be sampled from the principal axis of the diffusion tensor: .. math:: AK = K(\mathbf{\mathbf{e}_1) Although both approaches leads to an exactly calculation of AK, the first approach will be refered to as the analytical method whilte the second approach will be refered to as the numerical method based on their analogy to the estimation strategies for MK and RK. References ---------- .. [1] Jensen, J.H., Helpern, J.A., 2010. MRI quantification of non-Gaussian water diffusion by kurtosis analysis. NMR in Biomedicine 23(7): 698-710 .. [2] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011. Estimation of tensors and tensor-derived measures in diffusional kurtosis imaging. Magn Reson Med. 65(3), 823-836 .. [3] Barmpoutis, A., & Zhuo, J., 2011. Diffusion kurtosis imaging: Robust estimation from DW-MRI using homogeneous polynomials. Proceedings of the 8th {IEEE} International Symposium on Biomedical Imaging: From Nano to Macro, ISBI 2011, 262-265. doi: 10.1109/ISBI.2011.5872402 """""" # Flat parameters outshape = dki_params.shape[:-1] dki_params = dki_params.reshape((-1, dki_params.shape[-1])) # Split data evals, evecs, kt = split_dki_param(dki_params) # Initialize AK AK = np.zeros(kt.shape[:-1]) # select relevant voxels to process rel_i = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2]) kt = kt[rel_i] evecs = evecs[rel_i] evals = evals[rel_i] AKi = AK[rel_i] # Compute mean diffusivity md = mean_diffusivity(evals) if analytical: # Rotate the kurtosis tensor from the standard Cartesian coordinate # system to another coordinate system in which the 3 orthonormal # eigenvectors of DT are the base coordinate Wxxxx = Wrotate_element(kt, 0, 0, 0, 0, evecs) AKi = Wxxxx * (md ** 2) / (evals[..., 0] ** 2) else: # Compute apparent directional kurtosis along evecs[0] dt = lower_triangular(vec_val_vect(evecs, evals)) for vox in range(len(kt)): AKi[vox] = directional_kurtosis(dt[vox], md[vox], kt[vox], np.array([evecs[vox, :, 0]])) # reshape data according to input data AK[rel_i] = AKi if min_kurtosis is not None: AK = AK.clip(min=min_kurtosis) if max_kurtosis is not None: AK = AK.clip(max=max_kurtosis) return AK.reshape(outshape) ","def axial_kurtosis(dki_params, min_kurtosis=-3./7, max_kurtosis=10, analytical=True): r"""""" Computes axial Kurtosis (AK) from the kurtosis tensor [1]_, [2]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor min_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are smaller than `min_kurtosis` are replaced with `min_kurtosis`. Default = -3./7 (theoretical kurtosis limit for regions that consist of water confined to spherical pores [3]_) max_kurtosis : float (optional) To keep kurtosis values within a plausible biophysical range, axial kurtosis values that are larger than `max_kurtosis` are replaced with `max_kurtosis`. Default = 10 analytical : bool (optional) If True, AK is calculated from rotated diffusion kurtosis tensor, otherwise it will be computed from the apparent diffusion kurtosis values along the principal axis of the diffusion tensor (see notes). Default is set to True. Returns ------- ak : array Calculated AK. Notes ----- AK is defined as the directional kurtosis parallel to the fiber's main direction e1 [1]_, [2]_. You can compute AK using to approaches: 1) AK is calculated from rotated diffusion kurtosis tensor [2]_, i.e.: .. math:: AK = \hat{W}_{1111} \frac{(\lambda_{1}+\lambda_{2}+\lambda_{3})^2}{(9 \lambda_{1}^2)} 2) AK can be sampled from the principal axis of the diffusion tensor: .. math:: AK = K(\mathbf{\mathbf{e}_1) Although both approaches leads to an exactly calculation of AK, the first approach will be refered to as the analytical method whilte the second approach will be referred to as the numerical method based on their analogy to the estimation strategies for MK and RK. References ---------- .. [1] Jensen, J.H., Helpern, J.A., 2010. MRI quantification of non-Gaussian water diffusion by kurtosis analysis. NMR in Biomedicine 23(7): 698-710 .. [2] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011. Estimation of tensors and tensor-derived measures in diffusional kurtosis imaging. Magn Reson Med. 65(3), 823-836 .. [3] Barmpoutis, A., & Zhuo, J., 2011. Diffusion kurtosis imaging: Robust estimation from DW-MRI using homogeneous polynomials. Proceedings of the 8th {IEEE} International Symposium on Biomedical Imaging: From Nano to Macro, ISBI 2011, 262-265. doi: 10.1109/ISBI.2011.5872402 """""" # Flat parameters outshape = dki_params.shape[:-1] dki_params = dki_params.reshape((-1, dki_params.shape[-1])) # Split data evals, evecs, kt = split_dki_param(dki_params) # Initialize AK AK = np.zeros(kt.shape[:-1]) # select relevant voxels to process rel_i = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2]) kt = kt[rel_i] evecs = evecs[rel_i] evals = evals[rel_i] AKi = AK[rel_i] # Compute mean diffusivity md = mean_diffusivity(evals) if analytical: # Rotate the kurtosis tensor from the standard Cartesian coordinate # system to another coordinate system in which the 3 orthonormal # eigenvectors of DT are the base coordinate Wxxxx = Wrotate_element(kt, 0, 0, 0, 0, evecs) AKi = Wxxxx * (md ** 2) / (evals[..., 0] ** 2) else: # Compute apparent directional kurtosis along evecs[0] dt = lower_triangular(vec_val_vect(evecs, evals)) for vox in range(len(kt)): AKi[vox] = directional_kurtosis(dt[vox], md[vox], kt[vox], np.array([evecs[vox, :, 0]])) # reshape data according to input data AK[rel_i] = AKi if min_kurtosis is not None: AK = AK.clip(min=min_kurtosis) if max_kurtosis is not None: AK = AK.clip(max=max_kurtosis) return AK.reshape(outshape) " 26010,"def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"", id_part='name') extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "" ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "" ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1"")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1"")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's data disk."") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = ""Name of the image builder run output."" c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."" "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"") c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."") c.argument('image_template_name', image_template_name_type, help=""The name of the image template."") c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group=""Image Source"") ib_customizer_type = CLIArgumentType(arg_group=""Customizer"") ib_cutput_type = CLIArgumentType(arg_group=""Output"") c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into."" ib_img_location_help = ""Location where the customized image will be created."" c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."") c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."") c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""]) ib_default_loc_help = "" Defaults to resource group's location."" c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"") ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"") ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"") ib_powershell_type = CLIArgumentType(arg_group=""Powershell"") ib_file_customizer_type = CLIArgumentType(arg_group=""File"") c.argument('customizer_name', help=""Name of the customizer."") c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."") c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."") c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."") c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"") c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help=""The name or ID of the managed disk"", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help=""image sku's version"") c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help=""show all information including vm sizes not available under the current subscription"") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"") c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help=""Replace the host automatically if a failure occurs"") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help=""The software license type that will be applied to the VMs deployed on the dedicated host."") c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int, help=""Number of fault domains that the host group can span."") c.argument('zones', zone_type) c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='The flag that enable or disable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.') for scope in [""vm host"", ""vm host group""]: with self.argument_context(""{} create"".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = "" Otherwise, location will default to the resource group's location"" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings[""help""] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group."" "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help=""Specify the Microsoft.Network API version used when creating networking resources in the Network "" ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "" ""value is 2020-11-01."") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"") c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"") c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"") c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."") c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."") c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use ""=Delete =Detach"" to configure each disk') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help=""Scope that the system assigned identity can access. "") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help=""the $orderby odata query option"") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig show') as c: c.argument('select', help='The select expression to apply on the operation.') c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"", arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `,`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: ..', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for.

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.') ","def load_arguments(self, _): # Model imports DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help=""The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`"", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help=""Scale set name. You can configure the default using `az configure --defaults vmss=`"", id_part='name') extension_instance_name_type = CLIArgumentType(help=""Name of extension instance, which can be customized. Default: name of the extension."") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01') license_type = CLIArgumentType( help=""Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "" ""Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "" ""use 'Windows_Client'. For more information see the Azure Windows VM online docs."", arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', 'None', 'RHEL_ELS_6'])) # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default=""V1"")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type([""V1"", ""V2""], default=""V1"")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') security_type = CLIArgumentType(arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run ""az {0} grant-access --access-level Write"" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help=""The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10"") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's OS disk."") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help=""Storage caching type for the image's data disk."") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api=""2019-03-01"", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = ""Name of the image builder run output."" c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help=""Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."" "" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'"") c.argument('source', options_list=[""--image-source"", ""-i""], help=""The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID."") c.argument('image_template_name', image_template_name_type, help=""The name of the image template."") c.argument('checksum', help=""The SHA256 checksum of the Red Hat ISO image"") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g ""image_1=westus2 image_2=westus"". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g ""my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth."" ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a ""/"". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group=""Image Source"") ib_customizer_type = CLIArgumentType(arg_group=""Customizer"") ib_cutput_type = CLIArgumentType(arg_group=""Output"") c.argument('build_timeout', type=int, help=""The Maximum duration to wait while building the image template, in minutes. Default is 60."") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = ""Space-separated list of regions to replicate the image version into."" ib_img_location_help = ""Location where the customized image will be created."" c.argument('gallery_image_definition', arg_group=""Shared Image Gallery"", help=""Name or ID of the existing SIG image definition to create the customized image version with."") c.argument('gallery_name', arg_group=""Shared Image Gallery"", help=""Shared image gallery name, if image definition name and not ID was provided."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group=""Managed Image"", help=""Name or ID of the customized managed image to be created."") c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = ""Tags that will be applied to the output artifact once it has been created by the distributor. "" + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=[""--artifact-tags""]) ib_default_loc_help = "" Defaults to resource group's location."" c.argument('output_name', help=ib_output_name_help + "" Defaults to the name of the managed image or sig image definition."") c.argument('gallery_replication_regions', arg_group=""Shared Image Gallery"", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group=""Managed Image"", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group=""VHD"", help=""The output is a VHD distributor."", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group=""Windows Restart"") ib_win_update_type = CLIArgumentType(arg_group=""Windows Update"") ib_script_type = CLIArgumentType(arg_group=""Shell and Powershell"") ib_powershell_type = CLIArgumentType(arg_group=""Powershell"") ib_file_customizer_type = CLIArgumentType(arg_group=""File"") c.argument('customizer_name', help=""Name of the customizer."") c.argument('customizer_type', options_list=['--type', '-t'], help=""Type of customizer to be added to the image template."", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help=""URL of script to customize the image with. The URL must be publicly accessible."") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help=""Space-separated list of inline script lines to customize the image with."") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help=""Space-separated list of valid exit codes, as integers"") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help=""Command to execute the restart operation."") c.argument('restart_check_command', arg_type=ib_win_restart_type, help=""Command to verify that restart succeeded."") c.argument('restart_timeout', arg_type=ib_win_restart_type, help=""Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)"", default=""5m"") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help=""The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc."") c.argument('dest_path', arg_type=ib_file_customizer_type, help=""The absolute destination path where the file specified in --file-source will be downloaded to in the image"") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help=""Managed OS disk ID or name to swap to"") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help=""enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2"") c.argument('disk_caching', nargs='*', help=""Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify """" for none (\'""""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine"") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together."") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help=""Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help=""Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples."") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help=""The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range."") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help=""Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only"") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help=""The name or ID of the managed disk"", id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('disks', nargs='*', help=""One or more names or IDs of the managed disk (space-delimited)."", completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm extension show') as c: c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query ""[?attributes.enabled].id"" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help=""image sku's version"") c.argument('urn', help=""URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted"") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help=""Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd"") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help=""size name, partial name is accepted"") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help=""show skus supporting availability zones"") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help=""show all information including vm sizes not available under the current subscription"") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. ""availabilitySets"", ""snapshots"", ""disks"", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help=""Name of the Dedicated Host Group"") c.argument('host_name', name_arg_type, id_part='child_name_1', help=""Name of the Dedicated Host"") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help=""Fault domain of the host within a group. Allowed values: 0, 1, 2"") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help=""Replace the host automatically if a failure occurs"") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help=""The software license type that will be applied to the VMs deployed on the dedicated host."") c.argument('sku', help=""SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/"") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help=""Name of the Dedicated Host Group"") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=[""--platform-fault-domain-count"", ""-c""], type=int, help=""Number of fault domains that the host group can span."") c.argument('zones', zone_type) c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.') for scope in [""vm host"", ""vm host group""]: with self.argument_context(""{} create"".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = "" Otherwise, location will default to the resource group's location"" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings[""help""] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help=""Limit the scale set to a single placement group."" "" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details."") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help=""The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set"") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help=""Specify the Microsoft.Network API version used when creating networking resources in the Network "" ""Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "" ""value is 2020-11-01."") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('security_type', security_type) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify """" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify """" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help=""Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'"") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help=""Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules"") c.argument('vm_domain_name', help=""domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`"") c.argument('dns_servers', nargs='+', help=""space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6"") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help=""enable accelerated networking. Unless specified, CLI will enable it based on machine image and size"") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group=""Protection Policy"", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help=""Protect the VM instance from scale-in operations."") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help=""Protect the VM instance from scale set actions (including scale-in)."") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to ""Standard_DS1_v2"". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('enable_secure_boot', enable_secure_boot_type) c.argument('enable_vtpm', enable_vtpm_type) with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help=""{0} VM instance with this ID. If missing, {0} VMSS."".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help=""The command id. Use 'az {} run-command list' to get the list"".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help=""space-separated parameters in the format of '[name=]value'"") c.argument('scripts', nargs='+', help=""Space-separated script lines. Use @{file} to load script from a file"") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') for scope in ['create', 'update']: with self.argument_context('vm run-command {}'.format(scope)) as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.') c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') with self.argument_context('vm run-command delete') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vm run-command list') as c: c.argument('vm_name', run_cmd_vm_name, id_part=None) c.argument('expand', help='The expand expression to apply on the operation.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) with self.argument_context('vm run-command show') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') with self.argument_context('vm run-command wait') as c: c.argument('vm_name', run_cmd_vm_name) c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('command_id', help='The command id.') run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') for scope in ['create', 'update']: with self.argument_context('vmss run-command {}'.format(scope)) as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('script', help='Contain the powershell or bash script to execute on the VM.') c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using ""list"" command.') c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' 'will complete as soon as the script starts and will not wait for script to complete.') c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') with self.argument_context('vmss run-command delete') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) with self.argument_context('vmss run-command list') as c: c.argument('vmss_name', run_cmd_vmss_name, id_part=None) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('expand', help='The expand expression to apply on the operation.') with self.argument_context('vmss run-command show') as c: c.argument('vmss_name', run_cmd_vmss_name) c.argument('instance_id', help='The instance ID of the virtual machine.') c.argument('run_command_name', run_cmd_name_type) c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='The instance view of a run command.') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help=""Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help=""Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'"".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm application set', 'vmss application set']: with self.argument_context(scope) as c: c.argument('vm', existing_vm_name) c.argument('vmss_name', vmss_name_type) c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help=""Space-separated application version ids to set to VM."") c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.') c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', help='Space-separated application configuration overrides for each application version ids. ' 'It should have the same number of items as the application version ids. Null is available for a application ' 'which does not have a configuration override.') for scope in ['vm application list', 'vmss application list']: with self.argument_context(scope) as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) c.argument('vmss_name', vmss_name_type, id_part=None) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ ""sourceVault"": { ""id"": ""value"" }, ""vaultCertificates"": [{ ""certificateUrl"": ""value"", ""certificateStore"": ""cert store name (only on windows)""}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help=""accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples"") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) c.argument('accept_term', action='store_true', help=""Accept the license agreement and privacy statement."") c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help=""Password for the VM if authentication type is 'Password'."") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value ""/home/username/.ssh/authorized_keys"" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. ""all"" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = "", "".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = "", "".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is ""os"" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help=""Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created."") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help=""Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds"") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help=""storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk"") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use ""=Delete =Detach"" to configure each disk') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify """" for None (\'""""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help=""Scope that the system assigned identity can access. "") c.ignore('identity_role_id') for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', help='Role name or id the system assigned identity will have. ') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('identity_role', options_list=['--role'], help=""Role name or id the system assigned identity will have"") with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help=""the $orderby odata query option"") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('license_type', license_type) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in ""None"" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') with self.argument_context('vm update') as c: c.argument('license_type', license_type) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help=""Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular."") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig show') as c: c.argument('select', help='The select expression to apply on the operation.') c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help=""This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'."") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. ""IsSecureBootSupported=true IsMeasuredBootSupported=false""') c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration=""3.0.0"") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help=""the end of life date, e.g. '2020-12-31'"") c.argument('storage_account_type', help=""The default storage account type to be used per region. To set regional storage account types, use --target-regions"", arg_type=get_enum_type([""Standard_LRS"", ""Standard_ZRS"", ""Premium_LRS""]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use ""null"" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `,`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) c.argument('marker', arg_type=marker_type) c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('sig image-version show') as c: c.argument('expand', help=""The expand expression to apply on the operation, e.g. 'ReplicationStatus'"") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: ..', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Gallery applications with self.argument_context('sig gallery-application') as c: c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], help='The name of the gallery Application') with self.argument_context('sig gallery-application create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' 'to specify the supported type of the OS that application is built for.

Possible values ' 'are:

**Windows**

**Linux**') with self.argument_context('sig gallery-application update') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('description', help='The description of this gallery Application Definition resource. ' 'This property is updatable.') with self.argument_context('sig gallery-application version') as c: c.argument('gallery_application_name', options_list=['--application-name'], help='The name of the gallery Application') c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], help='The name of the gallery Application Version') for scope in ['create', 'update']: with self.argument_context('sig gallery-application version {}'.format(scope)) as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') c.argument('install_command', help='The path and arguments to install the gallery application.') c.argument('remove_command', help='The path and arguments to remove the gallery application.') c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' ' then update operation will invoke remove command on the previous version ' 'and install command on the current version of the gallery application.') c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is ' 'going to be replicated to. This property is updatable. Expected value: ' 'json-string/json-file/@json-file.') c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' 'deployed from the latest version of the Image Definition won\'t use this Image Version.', arg_group='Publishing Profile') c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help=""The name of the proximity placement group."") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help=""The type of the proximity placement group. Allowed values: Standard."") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help=""The name or ID of the proximity placement group the {} should be associated with."".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help=""Query to execute over Log Analytics data."") c.argument('timespan', help=""Timespan over which to query. Defaults to querying all available data."") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion # region Capacity with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called ""CapacityReservationSupported"" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') # endRegion # region Restore point with self.argument_context('restore-point') as c: c.argument('restore_point_collection_name', options_list=['--collection-name'], help='The name of the restore point collection.') with self.argument_context('restore-point create') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' 'included.') c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') with self.argument_context('restore-point show') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') with self.argument_context('restore-point delete') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') with self.argument_context('restore-point wait') as c: c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], help='The name of the restore point.') # endRegion # region Restore point collection with self.argument_context('restore-point collection create') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', arg_group='Source') with self.argument_context('restore-point collection update') as c: c.argument('tags', tags_type) with self.argument_context('restore-point collection show') as c: c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.') " 14623,"def add_unseen_labels(train_label_dict, test_label_list): """""" Merge test set labels that not seen in the training data with seen ones. Parameters ---------- train_label_dict : dict Dictionary mapping training set class labels to class indices. test_label_list : list List containing labels in the test set. Returns ------- train_and_test_label_dict : dict Dictionary mapping merged lables from both the training and test set to indices. """""" # get the list of labels that were in the training set train_label_list = list(train_label_dict.keys()) # identify any unseen labels in the test set unseen_test_label_list = [label for label in test_label_list if label not in train_label_list] # create a new dictionary for these unseen labels with label indices # for them starting _after_ those for the training set labels unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list, start=len(train_label_list))} # combine the train label dictionary with this unseen label one & return train_and_test_label_dict = train_label_dict.copy() train_and_test_label_dict.update(unseen_label_dict) return train_and_test_label_dict ","def add_unseen_labels(train_label_dict, test_label_list): """""" Merge test set labels that not seen in the training data with seen ones. Parameters ---------- train_label_dict : dict Dictionary mapping training set class labels to class indices. test_label_list : list List containing labels in the test set. Returns ------- train_and_test_label_dict : dict Dictionary mapping merged labels from both the training and test sets to indices. """""" # get the list of labels that were in the training set train_label_list = list(train_label_dict.keys()) # identify any unseen labels in the test set unseen_test_label_list = [label for label in test_label_list if label not in train_label_list] # create a new dictionary for these unseen labels with label indices # for them starting _after_ those for the training set labels unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list, start=len(train_label_list))} # combine the train label dictionary with this unseen label one & return train_and_test_label_dict = train_label_dict.copy() train_and_test_label_dict.update(unseen_label_dict) return train_and_test_label_dict " 41557,"def update_model_params(context, ds_test, model_params, path_output): clustering_path = os.path.join(path_output, ""clustering_models.joblib"") metadata_clustering_models = joblib.load(clustering_path) # Model directory ohe_path = os.path.join(path_output, context[""model_name""], ""one_hot_encoder.joblib"") one_hot_encoder = joblib.load(ohe_path) ds_test = imed_film.normalize_metadata(ds_test, metadata_clustering_models, context[""debugging""], model_params['metadata']) model_params.update({""film_onehotencoder"": one_hot_encoder, ""n_metadata"": len([ll for l in one_hot_encoder.categories_ for ll in l])}) return ds_test, model_params ","def update_film_model_params(context, ds_test, model_params, path_output): clustering_path = os.path.join(path_output, ""clustering_models.joblib"") metadata_clustering_models = joblib.load(clustering_path) # Model directory ohe_path = os.path.join(path_output, context[""model_name""], ""one_hot_encoder.joblib"") one_hot_encoder = joblib.load(ohe_path) ds_test = imed_film.normalize_metadata(ds_test, metadata_clustering_models, context[""debugging""], model_params['metadata']) model_params.update({""film_onehotencoder"": one_hot_encoder, ""n_metadata"": len([ll for l in one_hot_encoder.categories_ for ll in l])}) return ds_test, model_params " 1231,"def test_mghheader_default_structarr(): hdr = MGHHeader.default_structarr() assert hdr['version'] == 1 assert_array_equal(hdr['dims'], 1) assert hdr['type'] == 3 assert hdr['dof'] == 0 assert hdr['goodRASFlag'] == 1 assert_array_equal(hdr['delta'], 1) assert_array_equal(hdr['Mdc'], [[-1, 0, 0], [0, 0, 1], [0, -1, 0]]) assert_array_equal(hdr['Pxyz_c'], 0) assert hdr['tr'] ==0 assert hdr['flip_angle'] == 0 assert hdr['te'] == 0 assert hdr['ti'] == 0 assert hdr['fov'] == 0 for endianness in (None,) + BIG_CODES: hdr2 = MGHHeader.default_structarr(endianness=endianness) assert hdr2 == hdr assert hdr2.newbyteorder('>') == hdr for endianness in LITTLE_CODES: with pytest.raises(ValueError): MGHHeader.default_structarr(endianness=endianness) ","def test_mghheader_default_structarr(): hdr = MGHHeader.default_structarr() assert hdr['version'] == 1 assert_array_equal(hdr['dims'], 1) assert hdr['type'] == 3 assert hdr['dof'] == 0 assert hdr['goodRASFlag'] == 1 assert_array_equal(hdr['delta'], 1) assert_array_equal(hdr['Mdc'], [[-1, 0, 0], [0, 0, 1], [0, -1, 0]]) assert_array_equal(hdr['Pxyz_c'], 0) assert hdr['tr'] == 0 assert hdr['flip_angle'] == 0 assert hdr['te'] == 0 assert hdr['ti'] == 0 assert hdr['fov'] == 0 for endianness in (None,) + BIG_CODES: hdr2 = MGHHeader.default_structarr(endianness=endianness) assert hdr2 == hdr assert hdr2.newbyteorder('>') == hdr for endianness in LITTLE_CODES: with pytest.raises(ValueError): MGHHeader.default_structarr(endianness=endianness) " 653,"def on_event(name, ioloop, **data): event = data event['event'] = name message = json.dumps(event, cls=models.ModelJSONEncoder) handlers.WebSocketHandler.broadcast(ioloop, message) ","def on_event(name, io_loop, **data): event = data event['event'] = name message = json.dumps(event, cls=models.ModelJSONEncoder) handlers.WebSocketHandler.broadcast(ioloop, message) " 28603,"def plot_violin( data, var_names=None, filter_vars=None, transform=None, quartiles=True, rug=False, hdi_prob=None, shade=0.35, bw=""default"", circular=False, sharex=True, sharey=True, grid=None, figsize=None, textsize=None, labeller=None, ax=None, shade_kwargs=None, rug_kwargs=None, backend=None, backend_kwargs=None, show=None, ): """"""Plot posterior of traces as violin plot. Notes ----- If multiple chains are provided for a variable they will be combined Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. transform: callable Function to transform data (defaults to None i.e. the identity function). quartiles: bool, optional Flag for plotting the interquartile range, in addition to the hdi_prob*100% intervals. Defaults to ``True``. rug: bool If ``True`` adds a jittered rugplot. Defaults to ``False``. hdi_prob: float, optional Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94. shade: float Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1 (opaque). Defaults to 0. bw: float or str, optional If numeric, indicates the bandwidth and must be positive. If str, indicates the method to estimate the bandwidth and must be one of ""scott"", ""silverman"", ""isj"" or ""experimental"" when ``circular`` is ``False`` and ""taylor"" (for now) when ``circular`` is ``True``. Defaults to ""default"" which means ""experimental"" when variable is not circular and ""taylor"" when it is. circular: bool, optional. If ``True``, it interprets `values` is a circular variable measured in radians and a circular KDE is used. Defaults to ``False``. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize: tuple Figure size. If None it will be defined automatically. textsize: int Text size of the point_estimates, axis ticks, and highest density interval. If None it will be autoscaled based on ``figsize``. labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. sharex: bool Defaults to ``True``, violinplots share a common x-axis scale. sharey: bool Defaults to ``True``, violinplots share a common y-axis scale. ax: numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). shade_kwargs: dicts, optional Additional keywords passed to ``fill_between``, or ``barh`` to control the shade. rug_kwargs: dict Keywords passed to the rug plot. If true only the right half side of the violin will be plotted. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default to ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_forest: Forest plot to compare HDI intervals from a number of distributions. Examples -------- Show a default violin plot .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_violin(data) Show a default violin plot, but with a transformation applied to the data .. plot:: :context: close-figs >>> az.plot_violin(data, var_names=""tau"", transform=np.log) """""" if labeller is None: labeller = BaseLabeller() data = convert_to_dataset(data, group=""posterior"") if transform is not None: data = transform(data) var_names = _var_names(var_names, data, filter_vars) plotters = filter_plotters_list( list(xarray_var_iter(data, var_names=var_names, combined=True)), ""plot_violin"" ) rows, cols = default_grid(len(plotters), grid=grid) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") violinplot_kwargs = dict( ax=ax, plotters=plotters, figsize=figsize, rows=rows, cols=cols, sharex=sharex, sharey=sharey, shade_kwargs=shade_kwargs, shade=shade, rug=rug, rug_kwargs=rug_kwargs, bw=bw, textsize=textsize, labeller=labeller, circular=circular, hdi_prob=hdi_prob, quartiles=quartiles, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_violin"", ""violinplot"", backend) ax = plot(**violinplot_kwargs) return ax ","def plot_violin( data, var_names=None, filter_vars=None, transform=None, quartiles=True, rug=False, hdi_prob=None, shade=0.35, bw=""default"", circular=False, sharex=True, sharey=True, grid=None, figsize=None, textsize=None, labeller=None, ax=None, shade_kwargs=None, rug_kwargs=None, backend=None, backend_kwargs=None, show=None, ): """"""Plot posterior of traces as violin plot. Notes ----- If multiple chains are provided for a variable they will be combined Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. transform: callable Function to transform data (defaults to None i.e. the identity function). quartiles: bool, optional Flag for plotting the interquartile range, in addition to the ``hdi_prob``*100% intervals. Defaults to ``True``. rug: bool If ``True`` adds a jittered rugplot. Defaults to ``False``. hdi_prob: float, optional Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94. shade: float Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1 (opaque). Defaults to 0. bw: float or str, optional If numeric, indicates the bandwidth and must be positive. If str, indicates the method to estimate the bandwidth and must be one of ""scott"", ""silverman"", ""isj"" or ""experimental"" when ``circular`` is ``False`` and ""taylor"" (for now) when ``circular`` is ``True``. Defaults to ""default"" which means ""experimental"" when variable is not circular and ""taylor"" when it is. circular: bool, optional. If ``True``, it interprets `values` is a circular variable measured in radians and a circular KDE is used. Defaults to ``False``. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize: tuple Figure size. If None it will be defined automatically. textsize: int Text size of the point_estimates, axis ticks, and highest density interval. If None it will be autoscaled based on ``figsize``. labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. sharex: bool Defaults to ``True``, violinplots share a common x-axis scale. sharey: bool Defaults to ``True``, violinplots share a common y-axis scale. ax: numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). shade_kwargs: dicts, optional Additional keywords passed to ``fill_between``, or ``barh`` to control the shade. rug_kwargs: dict Keywords passed to the rug plot. If true only the right half side of the violin will be plotted. backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default to ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. For additional documentation check the plotting method of the backend. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_forest: Forest plot to compare HDI intervals from a number of distributions. Examples -------- Show a default violin plot .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_violin(data) Show a default violin plot, but with a transformation applied to the data .. plot:: :context: close-figs >>> az.plot_violin(data, var_names=""tau"", transform=np.log) """""" if labeller is None: labeller = BaseLabeller() data = convert_to_dataset(data, group=""posterior"") if transform is not None: data = transform(data) var_names = _var_names(var_names, data, filter_vars) plotters = filter_plotters_list( list(xarray_var_iter(data, var_names=var_names, combined=True)), ""plot_violin"" ) rows, cols = default_grid(len(plotters), grid=grid) if hdi_prob is None: hdi_prob = rcParams[""stats.hdi_prob""] else: if not 1 >= hdi_prob > 0: raise ValueError(""The value of hdi_prob should be in the interval (0, 1]"") violinplot_kwargs = dict( ax=ax, plotters=plotters, figsize=figsize, rows=rows, cols=cols, sharex=sharex, sharey=sharey, shade_kwargs=shade_kwargs, shade=shade, rug=rug, rug_kwargs=rug_kwargs, bw=bw, textsize=textsize, labeller=labeller, circular=circular, hdi_prob=hdi_prob, quartiles=quartiles, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_violin"", ""violinplot"", backend) ax = plot(**violinplot_kwargs) return ax " 50550,"def overlay(df1, df2, how=""intersection"", keep_geom_type=True, make_valid=True): """"""Perform spatial overlay between two GeoDataFrames. Currently only supports data GeoDataFrames with uniform geometry types, i.e. containing only (Multi)Polygons, or only (Multi)Points, or a combination of (Multi)LineString and LinearRing shapes. Implements several methods that are all effectively subsets of the union. Parameters ---------- df1 : GeoDataFrame df2 : GeoDataFrame how : string Method of spatial overlay: 'intersection', 'union', 'identity', 'symmetric_difference' or 'difference'. keep_geom_type : bool If True, return only geometries of the same geometry type as df1 has, if False, return all resulting gemetries. make_valid : bool If True, any invalid input geometries are corrected with a call to `buffer(0)`, if False, a `ValueError` is raised if any input geometries are invalid. Returns ------- df : GeoDataFrame GeoDataFrame with new set of polygons and attributes resulting from the overlay """""" # Allowed operations allowed_hows = [ ""intersection"", ""union"", ""identity"", ""symmetric_difference"", ""difference"", # aka erase ] # Error Messages if how not in allowed_hows: raise ValueError( ""`how` was '{0}' but is expected to be in {1}"".format(how, allowed_hows) ) if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries): raise NotImplementedError( ""overlay currently only implemented for "" ""GeoDataFrames"" ) if not _check_crs(df1, df2): _crs_mismatch_warn(df1, df2, stacklevel=3) polys = [""Polygon"", ""MultiPolygon""] lines = [""LineString"", ""MultiLineString"", ""LinearRing""] points = [""Point"", ""MultiPoint""] for i, df in enumerate([df1, df2]): poly_check = df.geom_type.isin(polys).any() lines_check = df.geom_type.isin(lines).any() points_check = df.geom_type.isin(points).any() if sum([poly_check, lines_check, points_check]) > 1: raise NotImplementedError( ""df{} contains mixed geometry types."".format(i + 1) ) # Computations def preprocess(df): df = df.copy() if df.geom_type.isin(polys).all(): mask = ~df.geometry.is_valid col = df._geometry_column_name if make_valid: df.loc[mask, col] = df.loc[mask, col].buffer(0) elif mask.any(): raise ValueError( ""You have passed make_valid=False along with "" f""{mask.sum()} invalid input geometries"" ) return df df1 = preprocess(df1) df2 = preprocess(df2) with warnings.catch_warnings(): # CRS checked above, supress array-level warning warnings.filterwarnings(""ignore"", message=""CRS mismatch between the CRS"") if how == ""difference"": return _overlay_difference(df1, df2) elif how == ""intersection"": result = _overlay_intersection(df1, df2) elif how == ""symmetric_difference"": result = _overlay_symmetric_diff(df1, df2) elif how == ""union"": result = _overlay_union(df1, df2) elif how == ""identity"": dfunion = _overlay_union(df1, df2) result = dfunion[dfunion[""__idx1""].notnull()].copy() if keep_geom_type: key_order = result.keys() exploded = result.reset_index(drop=True).explode() exploded = exploded.reset_index(level=0) type = df1.geom_type.iloc[0] if type in polys: exploded = exploded.loc[exploded.geom_type.isin(polys)] elif type in lines: exploded = exploded.loc[exploded.geom_type.isin(lines)] elif type in points: exploded = exploded.loc[exploded.geom_type.isin(points)] else: raise TypeError(""`keep_geom_type` does not support {}."".format(type)) # level_0 created with above reset_index operation # and represents the original geometry collections result = exploded.dissolve(by=""level_0"")[key_order] result.reset_index(drop=True, inplace=True) result.drop([""__idx1"", ""__idx2""], axis=1, inplace=True) return result ","def overlay(df1, df2, how=""intersection"", keep_geom_type=True, make_valid=True): """"""Perform spatial overlay between two GeoDataFrames. Currently only supports data GeoDataFrames with uniform geometry types, i.e. containing only (Multi)Polygons, or only (Multi)Points, or a combination of (Multi)LineString and LinearRing shapes. Implements several methods that are all effectively subsets of the union. Parameters ---------- df1 : GeoDataFrame df2 : GeoDataFrame how : string Method of spatial overlay: 'intersection', 'union', 'identity', 'symmetric_difference' or 'difference'. keep_geom_type : bool If True, return only geometries of the same geometry type as df1 has, if False, return all resulting gemetries. make_valid : bool If True, any invalid input geometries are corrected with a call to `buffer(0)`, if False, a `ValueError` is raised if any input geometries are invalid. Returns ------- df : GeoDataFrame GeoDataFrame with new set of polygons and attributes resulting from the overlay """""" # Allowed operations allowed_hows = [ ""intersection"", ""union"", ""identity"", ""symmetric_difference"", ""difference"", # aka erase ] # Error Messages if how not in allowed_hows: raise ValueError( ""`how` was '{0}' but is expected to be in {1}"".format(how, allowed_hows) ) if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries): raise NotImplementedError( ""overlay currently only implemented for "" ""GeoDataFrames"" ) if not _check_crs(df1, df2): _crs_mismatch_warn(df1, df2, stacklevel=3) polys = [""Polygon"", ""MultiPolygon""] lines = [""LineString"", ""MultiLineString"", ""LinearRing""] points = [""Point"", ""MultiPoint""] for i, df in enumerate([df1, df2]): poly_check = df.geom_type.isin(polys).any() lines_check = df.geom_type.isin(lines).any() points_check = df.geom_type.isin(points).any() if sum([poly_check, lines_check, points_check]) > 1: raise NotImplementedError( ""df{} contains mixed geometry types."".format(i + 1) ) # Computations def preprocess(df): df = df.copy() if df.geom_type.isin(polys).all(): mask = ~df.geometry.is_valid col = df._geometry_column_name if make_valid: df.loc[mask, col] = df.loc[mask, col].buffer(0) elif mask.any(): raise ValueError( ""You have passed make_valid=False along with "" f""{mask.sum()} invalid input geometries"" ) return df df1 = _make_valid(df1) df2 = _make_valid(df2) with warnings.catch_warnings(): # CRS checked above, supress array-level warning warnings.filterwarnings(""ignore"", message=""CRS mismatch between the CRS"") if how == ""difference"": return _overlay_difference(df1, df2) elif how == ""intersection"": result = _overlay_intersection(df1, df2) elif how == ""symmetric_difference"": result = _overlay_symmetric_diff(df1, df2) elif how == ""union"": result = _overlay_union(df1, df2) elif how == ""identity"": dfunion = _overlay_union(df1, df2) result = dfunion[dfunion[""__idx1""].notnull()].copy() if keep_geom_type: key_order = result.keys() exploded = result.reset_index(drop=True).explode() exploded = exploded.reset_index(level=0) type = df1.geom_type.iloc[0] if type in polys: exploded = exploded.loc[exploded.geom_type.isin(polys)] elif type in lines: exploded = exploded.loc[exploded.geom_type.isin(lines)] elif type in points: exploded = exploded.loc[exploded.geom_type.isin(points)] else: raise TypeError(""`keep_geom_type` does not support {}."".format(type)) # level_0 created with above reset_index operation # and represents the original geometry collections result = exploded.dissolve(by=""level_0"")[key_order] result.reset_index(drop=True, inplace=True) result.drop([""__idx1"", ""__idx2""], axis=1, inplace=True) return result " 47894,"def main(): log.basicConfig(format=""[ %(levelname)s ] %(message)s"", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # load vocabulary file for model log.info(""Loading vocab file:\t{}"".format(args.vocab)) with open(args.vocab, ""r"", encoding=""utf-8"") as r: vocab = dict((t.rstrip(""\n""), i) for i, t in enumerate(r.readlines())) log.info(""{} tokens loaded"".format(len(vocab))) # get context as a string (as we might need it's length for the sequence reshape) context = get_context(args) # encode context into token ids list c_tokens_id, c_tokens_se = text_to_tokens(context, vocab) log.info(""Initializing Inference Engine"") ie = IECore() log.info(""Device is {}"".format(args.device)) version = ie.get_versions(args.device)[args.device] version_str = ""{}.{}.{}"".format(version.major, version.minor, version.build_number) log.info(""Plugin version is {}"".format(version_str)) # read IR model_xml = args.model model_bin = os.path.splitext(model_xml)[0] + "".bin"" log.info(""Loading network files:\n\t{}\n\t{}"".format(model_xml, model_bin)) ie_encoder = ie.read_network(model=model_xml, weights=model_bin) if args.reshape: # reshape the sequence length to the context + maximum question length (in tokens) first_input_layer = next(iter(ie_encoder.inputs)) c = ie_encoder.inputs[first_input_layer].shape[1] # find the closest multiple of 64 seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64) if seq < c: input_info = list(ie_encoder.inputs) new_shapes = dict([]) for i in input_info: n, c = ie_encoder.inputs[i].shape new_shapes[i] = [n, seq] log.info(""Reshaped input {} from {} to the {}"".format(i, ie_encoder.inputs[i].shape, new_shapes[i])) log.info(""Attempting to reshape the network to the modified inputs..."") try: ie_encoder.reshape(new_shapes) log.info(""Successful!"") except: log.info(""Failed...reloading the network"") ie_encoder = ie.read_network(model=model_xml, weights=model_bin) log.info(""Done"") else: log.info(""Skipping network reshaping,"" "" as (context length + max question length) exceeds the current (input) network sequence length"") # check input and output names input_names_model = list(ie_encoder.inputs.keys()) output_names_model = list(ie_encoder.outputs.keys()) input_names = eval(args.input_names) output_names = eval(args.output_names) if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names): log.error(""Input or Output names do not match"") log.error("" Network input->output names: {}->{}"".format(input_names_model, output_names_model)) log.error("" Expected (from the demo cmd-line) input->output names: {}->{}"".format(input_names, output_names)) raise Exception(""Unexpected network input or output names"") # load model to the device log.info(""Loading model to the {}"".format(args.device)) ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device) # loop on user's questions while True: question = input('Type question (enter to exit):') if not question: break q_tokens_id, _ = text_to_tokens(question, vocab) # maximum number of tokens that can be processed by network at once max_length = ie_encoder.inputs[input_names[0]].shape[1] # calculate number of tokens for context in each inference request. # reserve 3 positions for special tokens # [CLS] q_tokens [SEP] c_tokens [SEP] c_wnd_len = max_length - (len(q_tokens_id) + 3) # token num between two neighbour context windows # 1/2 means that context windows are overlapped by half c_stride = c_wnd_len // 2 t0 = time.time() t_count = 0 # array of answers from each window answers = [] # init a window to iterate over context c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id)) # iterate while context window is not empty while c_e > c_s: # form the request tok_cls = vocab['[CLS]'] tok_sep = vocab['[SEP]'] input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep] token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0] attention_mask = [1] * len(input_ids) # pad the rest of the request pad_len = max_length - len(input_ids) input_ids += [0] * pad_len token_type_ids += [0] * pad_len attention_mask += [0] * pad_len # create numpy inputs for IE inputs = { input_names[0]: np.array([input_ids], dtype=np.int32), input_names[1]: np.array([attention_mask], dtype=np.int32), input_names[2]: np.array([token_type_ids], dtype=np.int32), } t_start = time.time() # infer by IE res = ie_encoder_exec.infer(inputs=inputs) t_end = time.time() t_count += 1 log.info(""Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)"".format( max_length, 1 / (t_end - t_start), t_end - t_start )) # get start-end scores for context def get_score(name): out = np.exp(res[name].reshape((max_length,))) return out / out.sum(axis=-1) score_s = get_score(output_names[0]) score_e = get_score(output_names[1]) # get 'no-answer' score (not valid if model has been fine-tuned on squad1.x) if args.model_squad_ver.split('.')[0] == '1': score_na = 0 else: score_na = score_s[0] * score_e[0] # find product of all start-end combinations to find the best one c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor score_mat = np.matmul( score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)), score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s)) ) # reset candidates with end before start score_mat = np.triu(score_mat) # reset long candidates (>max_answer_token_num) score_mat = np.tril(score_mat, args.max_answer_token_num - 1) # find the best start-end pair max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1]) max_score = score_mat[max_s, max_e] * (1 - score_na) # convert to context text start-end index max_s = c_tokens_se[c_s + max_s][0] max_e = c_tokens_se[c_s + max_e][1] # check that answers list does not have duplicates (because of context windows overlapping) same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e] if same: assert len(same) == 1 # update exist answer record a = answers[same[0]] answers[same[0]] = (max(max_score, a[0]), max_s, max_e) else: # add new record answers.append((max_score, max_s, max_e)) # check that context window reach the end if c_e == len(c_tokens_id): break # move to next window position c_s = min(c_s + c_stride, len(c_tokens_id)) c_e = min(c_s + c_wnd_len, len(c_tokens_id)) t1 = time.time() log.info(""{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)"".format( t_count, max_length, t1 - t0, (t1 - t0) / t_count )) # print top 3 results answers = list(sorted(answers, key=lambda x: -x[0])) for score, s, e in answers[:3]: log.info(""---answer: {:0.2f} {}"".format(score, context[s:e])) c_s, c_e = find_sentence_range(context, s, e) log.info("" "" + context[c_s:s] + ""\033[91m"" + context[s:e] + '\033[0m' + context[e:c_e]) ","def main(): log.basicConfig(format=""[ %(levelname)s ] %(message)s"", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # load vocabulary file for model log.info(""Loading vocab file:\t{}"".format(args.vocab)) with open(args.vocab, ""r"", encoding=""utf-8"") as r: vocab = dict((t.rstrip(""\n""), i) for i, t in enumerate(r.readlines())) log.info(""{} tokens loaded"".format(len(vocab))) # get context as a string (as we might need it's length for the sequence reshape) context = get_context(args) # encode context into token ids list c_tokens_id, c_tokens_se = text_to_tokens(context, vocab) log.info(""Initializing Inference Engine"") ie = IECore() log.info(""Device is {}"".format(args.device)) version = ie.get_versions(args.device)[args.device] version_str = ""{}.{}.{}"".format(version.major, version.minor, version.build_number) log.info(""Plugin version is {}"".format(version_str)) # read IR model_xml = args.model model_bin = os.path.splitext(model_xml)[0] + "".bin"" log.info(""Loading network files:\n\t{}\n\t{}"".format(model_xml, model_bin)) ie_encoder = ie.read_network(model=model_xml, weights=model_bin) if args.reshape: # reshape the sequence length to the context + maximum question length (in tokens) first_input_layer = next(iter(ie_encoder.inputs)) c = ie_encoder.inputs[first_input_layer].shape[1] # find the closest multiple of 64 seq = min(c, round((len(c_tokens_id) + args.max_question_token_num) / 64) * 64) if seq < c: input_info = list(ie_encoder.inputs) new_shapes = dict([]) for i in input_info: n, c = ie_encoder.inputs[i].shape new_shapes[i] = [n, seq] log.info(""Reshaped input {} from {} to the {}"".format(i, ie_encoder.inputs[i].shape, new_shapes[i])) log.info(""Attempting to reshape the network to the modified inputs..."") try: ie_encoder.reshape(new_shapes) log.info(""Successful!"") except RuntimeError: log.info(""Failed...reloading the network"") ie_encoder = ie.read_network(model=model_xml, weights=model_bin) log.info(""Done"") else: log.info(""Skipping network reshaping,"" "" as (context length + max question length) exceeds the current (input) network sequence length"") # check input and output names input_names_model = list(ie_encoder.inputs.keys()) output_names_model = list(ie_encoder.outputs.keys()) input_names = eval(args.input_names) output_names = eval(args.output_names) if set(input_names_model) != set(input_names) or set(output_names_model) != set(output_names): log.error(""Input or Output names do not match"") log.error("" Network input->output names: {}->{}"".format(input_names_model, output_names_model)) log.error("" Expected (from the demo cmd-line) input->output names: {}->{}"".format(input_names, output_names)) raise Exception(""Unexpected network input or output names"") # load model to the device log.info(""Loading model to the {}"".format(args.device)) ie_encoder_exec = ie.load_network(network=ie_encoder, device_name=args.device) # loop on user's questions while True: question = input('Type question (enter to exit):') if not question: break q_tokens_id, _ = text_to_tokens(question, vocab) # maximum number of tokens that can be processed by network at once max_length = ie_encoder.inputs[input_names[0]].shape[1] # calculate number of tokens for context in each inference request. # reserve 3 positions for special tokens # [CLS] q_tokens [SEP] c_tokens [SEP] c_wnd_len = max_length - (len(q_tokens_id) + 3) # token num between two neighbour context windows # 1/2 means that context windows are overlapped by half c_stride = c_wnd_len // 2 t0 = time.time() t_count = 0 # array of answers from each window answers = [] # init a window to iterate over context c_s, c_e = 0, min(c_wnd_len, len(c_tokens_id)) # iterate while context window is not empty while c_e > c_s: # form the request tok_cls = vocab['[CLS]'] tok_sep = vocab['[SEP]'] input_ids = [tok_cls] + q_tokens_id + [tok_sep] + c_tokens_id[c_s:c_e] + [tok_sep] token_type_ids = [0] + [0] * len(q_tokens_id) + [0] + [1] * (c_e - c_s) + [0] attention_mask = [1] * len(input_ids) # pad the rest of the request pad_len = max_length - len(input_ids) input_ids += [0] * pad_len token_type_ids += [0] * pad_len attention_mask += [0] * pad_len # create numpy inputs for IE inputs = { input_names[0]: np.array([input_ids], dtype=np.int32), input_names[1]: np.array([attention_mask], dtype=np.int32), input_names[2]: np.array([token_type_ids], dtype=np.int32), } t_start = time.time() # infer by IE res = ie_encoder_exec.infer(inputs=inputs) t_end = time.time() t_count += 1 log.info(""Sequence of length {} is processed with {:0.2f} sentence/sec ({:0.2} sec per request)"".format( max_length, 1 / (t_end - t_start), t_end - t_start )) # get start-end scores for context def get_score(name): out = np.exp(res[name].reshape((max_length,))) return out / out.sum(axis=-1) score_s = get_score(output_names[0]) score_e = get_score(output_names[1]) # get 'no-answer' score (not valid if model has been fine-tuned on squad1.x) if args.model_squad_ver.split('.')[0] == '1': score_na = 0 else: score_na = score_s[0] * score_e[0] # find product of all start-end combinations to find the best one c_s_idx = len(q_tokens_id) + 2 # index of first context token in tensor c_e_idx = max_length - (1 + pad_len) # index of last+1 context token in tensor score_mat = np.matmul( score_s[c_s_idx:c_e_idx].reshape((c_e - c_s, 1)), score_e[c_s_idx:c_e_idx].reshape((1, c_e - c_s)) ) # reset candidates with end before start score_mat = np.triu(score_mat) # reset long candidates (>max_answer_token_num) score_mat = np.tril(score_mat, args.max_answer_token_num - 1) # find the best start-end pair max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1]) max_score = score_mat[max_s, max_e] * (1 - score_na) # convert to context text start-end index max_s = c_tokens_se[c_s + max_s][0] max_e = c_tokens_se[c_s + max_e][1] # check that answers list does not have duplicates (because of context windows overlapping) same = [i for i, a in enumerate(answers) if a[1] == max_s and a[2] == max_e] if same: assert len(same) == 1 # update exist answer record a = answers[same[0]] answers[same[0]] = (max(max_score, a[0]), max_s, max_e) else: # add new record answers.append((max_score, max_s, max_e)) # check that context window reach the end if c_e == len(c_tokens_id): break # move to next window position c_s = min(c_s + c_stride, len(c_tokens_id)) c_e = min(c_s + c_wnd_len, len(c_tokens_id)) t1 = time.time() log.info(""{} requests by {} length are processed by {:0.2f}sec ({:0.2}sec per request)"".format( t_count, max_length, t1 - t0, (t1 - t0) / t_count )) # print top 3 results answers = list(sorted(answers, key=lambda x: -x[0])) for score, s, e in answers[:3]: log.info(""---answer: {:0.2f} {}"".format(score, context[s:e])) c_s, c_e = find_sentence_range(context, s, e) log.info("" "" + context[c_s:s] + ""\033[91m"" + context[s:e] + '\033[0m' + context[e:c_e]) " 31286,"def install_all_content_packs(client: demisto_client, host: str, server_version: str): """""" Iterates over the packs currently located in the Packs directory. Wrapper for install_packs. Retrieving the latest version of each pack from the metadata file in content repo. :param client: Demisto-py client to connect to the server. :param host: FQDN of the server. :param server_version: The version of the server the packs are installed on. :return: None. Prints the response from the server in the build. """""" all_packs = [] logging.debug(f""Installing all content packs in server {host}"") for pack_id in os.listdir(PACKS_FULL_PATH): if pack_id not in IGNORED_FILES: metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE) with open(metadata_path, 'r') as json_file: pack_metadata = json.load(json_file) pack_version = pack_metadata.get('currentVersion') server_min_version = pack_metadata.get('serverMinVersion', '6.0.0') hidden = pack_metadata.get('hidden', False) # Check if the server version is greater than the minimum server version required for this pack: if ('Master' in server_version or LooseVersion(server_version) >= LooseVersion(server_min_version)) and not hidden: all_packs.append(get_pack_installation_request_data(pack_id, pack_version)) return install_packs(client, host, all_packs) ","def install_all_content_packs(client: demisto_client, host: str, server_version: str): """""" Iterates over the packs currently located in the Packs directory. Wrapper for install_packs. Retrieving the latest version of each pack from the metadata file in content repo. :param client: Demisto-py client to connect to the server. :param host: FQDN of the server. :param server_version: The version of the server the packs are installed on. :return: None. Prints the response from the server in the build. """""" all_packs = [] logging.debug(f""Installing all content packs in server {host}"") for pack_id in os.listdir(PACKS_FULL_PATH): if pack_id not in IGNORED_FILES: metadata_path = os.path.join(PACKS_FULL_PATH, pack_id, PACK_METADATA_FILE) with open(metadata_path, 'r') as json_file: pack_metadata = json.load(json_file) pack_version = pack_metadata.get('currentVersion') server_min_version = pack_metadata.get('serverMinVersion', '6.0.0') hidden = pack_metadata.get('hidden', False) # Check if the server version is greater than the minimum server version required for this pack or if the pack is hidden (deprecated): if ('Master' in server_version or LooseVersion(server_version) >= LooseVersion(server_min_version)) and not hidden: all_packs.append(get_pack_installation_request_data(pack_id, pack_version)) return install_packs(client, host, all_packs) " 30495,"def get_indicator_type(indicator_type, item): """"""Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return 'File ' + item.get('Algorithm') elif indicator_type == 'domain': return 'Domain' elif indicator_type == 'url': return 'URL' ","def get_indicator_type(indicator_type, item): """"""Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return 'File ' + item.get('Algorithm') elif indicator_type == 'domain': return 'Domain' elif indicator_type == 'url': return FeedIndicatorType.URL " 46916,"def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path: (str) path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir: (str) path to a directory containing a correctly shaped checkpoint save_path: (str) directory to save the new model """""" hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob(""*.ckpt"")) assert ckpt_files, f""could not find any ckpt files inside the {pl_ckpt_path} directory"" if len(ckpt_files) > 1: logger.info(f""averaging {ckpt_files}"") state_dicts = [sanitize(torch.load(x, map_location=""cpu"")[""state_dict""]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f""missing keys: {missing}"" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant ","def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path: (str) path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint save_path: (str) directory to save the new model """""" hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob(""*.ckpt"")) assert ckpt_files, f""could not find any ckpt files inside the {pl_ckpt_path} directory"" if len(ckpt_files) > 1: logger.info(f""averaging {ckpt_files}"") state_dicts = [sanitize(torch.load(x, map_location=""cpu"")[""state_dict""]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f""missing keys: {missing}"" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant " 56384,"def _backprop_to_all(outputs, retain_grad, loss_scale): OrderedDict = chainer.utils._collections.OrderedDict # fix py2 memory leak cand_funcs = [] seen_set = set() def add_cand(cand): if cand not in seen_set: # Negate since heapq is min-heap heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand)) seen_set.add(cand) grads = _backprop_utils.GradTable(load_if_new=True) root_nodes = set() leaf_nodes = set() for y_var in outputs: # TODO(sonots): Implement for ChainerX if y_var.xp is chainerx: raise NotImplementedError() y = y_var.node root_nodes.add(y) grads[y] = y_var.grad_var y._check_old_style_gradient() func = y.creator_node if func is None: # leaf leaf_nodes.add(y) else: add_cand(func) # Fix F812 (Python 2) y = None del y while cand_funcs: _, _, func = heapq.heappop(cand_funcs) inputs = func.inputs target_input_indexes = tuple([ i for i, x in enumerate(inputs) if x.requires_grad ]) outputs = [y() for y in func.outputs] # access via weak ref out_grad = tuple([grads.pop(y) for y in outputs]) if not target_input_indexes: continue if all([gy is None for gy in out_grad]): continue in_data = tuple([x.data for x in inputs]) out_grad_array = tuple( [None if g is None else g.array for g in out_grad]) hooks = chainer.get_function_hooks() if func._n_local_function_hooks != 0: hooks = collections.OrderedDict(hooks) hooks.update(func.local_function_hooks) hooks = hooks.values() # avoid six for performance with cuda.get_device_from_array(*(in_data + out_grad_array)): for hook in hooks: hook.backward_preprocess(func, in_data, out_grad_array) # Collect the current input gradients. target_inputs = [inputs[i] for i in target_input_indexes] # Keep the order for the portability, rather than # in_grad = {x: grads.get_as_list(x) # for x in set(target_inputs)} in_grad = OrderedDict() for x in target_inputs: if x not in in_grad: in_grad[x] = grads.get_as_list(x) # to reduce memory usage x._set_grad_var_if_available(None) _backprop_utils.backprop_step( func, target_input_indexes, out_grad, in_grad) for hook in hooks: hook.backward_postprocess(func, in_data, out_grad_array) for y, gy in six.moves.zip(outputs, out_grad): if y is not None and y not in root_nodes: y._set_grad_var_if_available( gy if retain_grad else None) del gy, out_grad # to reduce memory usage for x, gx in in_grad.items(): if not gx: # gradient == None continue for gx_elem in gx: _check_grad_type(func, x, True, gx_elem, True) del gx_elem # to reduce memory usage if x.creator_node is None: # leaf leaf_nodes.add(x) else: add_cand(x.creator_node) del gx, in_grad # to reduce memory usage for x in leaf_nodes: x_var = x.get_variable_or_none() gx = grads.pop(x) if x_var is not None: x_var._set_grad_var_without_check(gx) x_var._loss_scale = loss_scale grads.assert_no_grads() ","def _backprop_to_all(outputs, retain_grad, loss_scale): OrderedDict = chainer.utils._collections.OrderedDict # fix py2 memory leak cand_funcs = [] seen_set = set() def add_cand(cand): if cand not in seen_set: # Negate since heapq is min-heap heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand)) seen_set.add(cand) grads = _backprop_utils.GradTable(load_if_new=True) root_nodes = set() leaf_nodes = set() for y_var in outputs: # TODO(sonots): Implement for ChainerX if y_var.xp is chainerx: raise NotImplementedError() y = y_var.node root_nodes.add(y) grads[y] = y_var.grad_var y._check_old_style_gradient() func = y.creator_node if func is None: # leaf leaf_nodes.add(y) else: add_cand(func) # Fix F812 (Python 2) y = None del y while cand_funcs: _, _, func = heapq.heappop(cand_funcs) inputs = func.inputs target_input_indexes = tuple([ i for i, x in enumerate(inputs) if x.requires_grad ]) outputs = [y() for y in func.outputs] # access via weak ref out_grad = tuple([grads.pop(y) for y in outputs]) if not target_input_indexes: continue for gy in out_grad: if gy is not None: break else: continue in_data = tuple([x.data for x in inputs]) out_grad_array = tuple( [None if g is None else g.array for g in out_grad]) hooks = chainer.get_function_hooks() if func._n_local_function_hooks != 0: hooks = collections.OrderedDict(hooks) hooks.update(func.local_function_hooks) hooks = hooks.values() # avoid six for performance with cuda.get_device_from_array(*(in_data + out_grad_array)): for hook in hooks: hook.backward_preprocess(func, in_data, out_grad_array) # Collect the current input gradients. target_inputs = [inputs[i] for i in target_input_indexes] # Keep the order for the portability, rather than # in_grad = {x: grads.get_as_list(x) # for x in set(target_inputs)} in_grad = OrderedDict() for x in target_inputs: if x not in in_grad: in_grad[x] = grads.get_as_list(x) # to reduce memory usage x._set_grad_var_if_available(None) _backprop_utils.backprop_step( func, target_input_indexes, out_grad, in_grad) for hook in hooks: hook.backward_postprocess(func, in_data, out_grad_array) for y, gy in six.moves.zip(outputs, out_grad): if y is not None and y not in root_nodes: y._set_grad_var_if_available( gy if retain_grad else None) del gy, out_grad # to reduce memory usage for x, gx in in_grad.items(): if not gx: # gradient == None continue for gx_elem in gx: _check_grad_type(func, x, True, gx_elem, True) del gx_elem # to reduce memory usage if x.creator_node is None: # leaf leaf_nodes.add(x) else: add_cand(x.creator_node) del gx, in_grad # to reduce memory usage for x in leaf_nodes: x_var = x.get_variable_or_none() gx = grads.pop(x) if x_var is not None: x_var._set_grad_var_without_check(gx) x_var._loss_scale = loss_scale grads.assert_no_grads() " 41056,"def savefig(plot, filename): """"""Save figure."""""" plot.savefig(filename) plt.clf() ","def savefig(plot, filename): """"""Save a figure."""""" plot.savefig(filename) plt.clf() " 27271,"def fmt_schema(schema: sch.Schema) -> str: """"""Format `schema`. Parameters ---------- schema Ibis schema to format Returns ------- str Formatted schema """""" names = schema.names maxlen = max(map(len, names)) cols = [f""{name.ljust(maxlen)} {typ}"" for name, typ in schema.items()] if (depth := ibis.options.repr.table_columns) is not None: if depth < len(cols): return util.indent( fmt_truncated( cols, depth=depth, sep=""\n"", ellipsis=VERTICAL_ELLIPSIS.center(len(schema.names[0])), ), spaces=2, ) return util.indent(""\n"".join(cols), spaces=2) ","def fmt_schema(schema: sch.Schema) -> str: """"""Format `schema`. Parameters ---------- schema Ibis schema to format Returns ------- str Formatted schema """""" names = schema.names maxlen = max(map(len, names)) cols = [f""{name:<{maxlen}} {typ}"" for name, typ in schema.items()] if (depth := ibis.options.repr.table_columns) is not None: if depth < len(cols): return util.indent( fmt_truncated( cols, depth=depth, sep=""\n"", ellipsis=VERTICAL_ELLIPSIS.center(len(schema.names[0])), ), spaces=2, ) return util.indent(""\n"".join(cols), spaces=2) " 99,"def _validate_catalog(catalog, locale): validation_errors = [] for message in catalog: message_errors = validate(message, catalog) if message.lineno: if message.fuzzy: print( f'openlibrary/i18n/{locale}/messages.po:' f'{message.lineno}: {message.string}' ) if message_errors: validation_errors.append( f'openlibrary/i18n/{locale}/messages.po:' f'{message.lineno}: {message.string}' ) for e in message_errors: validation_errors.append(e) if validation_errors: print(""Validation failed..."") print(""Please correct the following errors before proceeding:"") for e in validation_errors: print(e) return len(validation_errors) ","def _validate_catalog(catalog, locale): validation_errors = [] for message in catalog: message_errors = validate(message, catalog) if message.lineno: if message.fuzzy: print( f'openlibrary/i18n/{locale}/messages.po:' f'{message.lineno}: ""{message.string}"" is fuzzy' ) if message_errors: validation_errors.append( f'openlibrary/i18n/{locale}/messages.po:' f'{message.lineno}: {message.string}' ) for e in message_errors: validation_errors.append(e) if validation_errors: print(""Validation failed..."") print(""Please correct the following errors before proceeding:"") for e in validation_errors: print(e) return len(validation_errors) " 6434,"def get_context(context): context.no_cache = 1 context.bg = 'background-color: #fafbfc; border-radius:0' context.align_greeting = 'start' context.align_search_box = '0' settings = frappe.get_doc(""Support Settings"", ""Support Settings"") s = settings context.greeting_text = s.greeting_text if s.greeting_text else ""We're here to help"" if s.greeting_text_and_search_bar_alignment == 'Center': context.align_greeting = 'center' context.align_search_box = '25%' if s.greeting_text_and_search_bar_alignment == 'Right': context.align_greeting = 'end' context.align_search_box = '50%' if s.background == 'Color' and s.select_color: context.bg = 'background-color: ' + s.select_color + '; border-radius:0' if s.background == 'Image' and s.add_image: context.bg = 'background-image: url(' + s.add_image + '); background-repeat: no-repeat; border-radius:0' # Support content favorite_article_count = 0 portal_setting = frappe.get_single(""Portal Settings"") context.favorite_article_list=[] context.help_article_list=[] context.category_list = frappe.get_all(""Help Category"", fields=""name"") all_articles = [i[0] for i in frappe.db.sql(""""""SELECT route from `tabHelp Article`"""""")] favorite_articles = get_favorite_articles() for article in favorite_articles: favorite_article_dict = {} if favorite_article_count < 3: if article[0] in all_articles: favorite_article = frappe.get_all(""Help Article"", fields=[""title"", ""content"", ""route"", ""category""], filters={""route"": article[0]}) content = frappe.utils.strip_html(favorite_article[0].content) if len(content) > 115: content = content[:112] + '...' favorite_article_dict = { 'title': favorite_article[0].title, 'content': content, 'category': favorite_article[0].category, 'route': favorite_article[0].route, } context.favorite_article_list.append(favorite_article_dict) favorite_article_count += 1 for category in context.category_list: help_aricles_per_category = {} help_articles = frappe.get_all(""Help Article"", fields=""*"", filters={""category"": category.name}, order_by=""modified desc"", limit=5) help_aricles_per_caetgory = { 'category': category, 'articles': help_articles, } context.help_article_list.append(help_aricles_per_caetgory) # Get Started sections if s.get_started_sections: sections = json.loads(s.get_started_sections) context.get_started_sections = sections # Forum posts if s.show_latest_forum_posts: topics_data, post_params = get_forum_posts(s) context.post_params = post_params context.forum_url = s.forum_url context.topics = topics_data[:3] # Issues if frappe.session.user != ""Guest"": context.issues = frappe.get_all(""Issue"", fields=[""name"", ""status"", ""subject"", ""modified""])[:3] else: context.issues = [] ","def get_context(context): context.no_cache = 1 context.bg = 'background-color: #fafbfc; border-radius:0' context.align_greeting = 'start' context.align_search_box = '0' settings = frappe.get_doc(""Support Settings"", ""Support Settings"") s = settings context.greeting_text = s.greeting_text or _(""We're here to help"") if s.greeting_text_and_search_bar_alignment == 'Center': context.align_greeting = 'center' context.align_search_box = '25%' if s.greeting_text_and_search_bar_alignment == 'Right': context.align_greeting = 'end' context.align_search_box = '50%' if s.background == 'Color' and s.select_color: context.bg = 'background-color: ' + s.select_color + '; border-radius:0' if s.background == 'Image' and s.add_image: context.bg = 'background-image: url(' + s.add_image + '); background-repeat: no-repeat; border-radius:0' # Support content favorite_article_count = 0 portal_setting = frappe.get_single(""Portal Settings"") context.favorite_article_list=[] context.help_article_list=[] context.category_list = frappe.get_all(""Help Category"", fields=""name"") all_articles = [i[0] for i in frappe.db.sql(""""""SELECT route from `tabHelp Article`"""""")] favorite_articles = get_favorite_articles() for article in favorite_articles: favorite_article_dict = {} if favorite_article_count < 3: if article[0] in all_articles: favorite_article = frappe.get_all(""Help Article"", fields=[""title"", ""content"", ""route"", ""category""], filters={""route"": article[0]}) content = frappe.utils.strip_html(favorite_article[0].content) if len(content) > 115: content = content[:112] + '...' favorite_article_dict = { 'title': favorite_article[0].title, 'content': content, 'category': favorite_article[0].category, 'route': favorite_article[0].route, } context.favorite_article_list.append(favorite_article_dict) favorite_article_count += 1 for category in context.category_list: help_aricles_per_category = {} help_articles = frappe.get_all(""Help Article"", fields=""*"", filters={""category"": category.name}, order_by=""modified desc"", limit=5) help_aricles_per_caetgory = { 'category': category, 'articles': help_articles, } context.help_article_list.append(help_aricles_per_caetgory) # Get Started sections if s.get_started_sections: sections = json.loads(s.get_started_sections) context.get_started_sections = sections # Forum posts if s.show_latest_forum_posts: topics_data, post_params = get_forum_posts(s) context.post_params = post_params context.forum_url = s.forum_url context.topics = topics_data[:3] # Issues if frappe.session.user != ""Guest"": context.issues = frappe.get_all(""Issue"", fields=[""name"", ""status"", ""subject"", ""modified""])[:3] else: context.issues = [] " 33119,"def direct_obs_matrix(Nx, obs_inds): """"""Generate matrix that ""picks"" state elements `obs_inds` out of `range(Nx)`. Parameters ---------- Nx: int Number of total length of state vector obs_inds: ndarray The observed indices. Returns ------- H: ndarray The observation matrix for direct partial observations. """""" Ny = len(obs_inds) H = np.zeros((Ny, Nx)) H[range(Ny), obs_inds] = 1 # One-liner: # H = np.array([[i==j for i in range(M)] for j in jj],float) return H ","def direct_obs_matrix(Nx, obs_inds): """"""Generate matrix that ""picks"" state elements `obs_inds` out of `range(Nx)`. Parameters ---------- Nx: int Length of state vector obs_inds: ndarray The observed indices. Returns ------- H: ndarray The observation matrix for direct partial observations. """""" Ny = len(obs_inds) H = np.zeros((Ny, Nx)) H[range(Ny), obs_inds] = 1 # One-liner: # H = np.array([[i==j for i in range(M)] for j in jj],float) return H " 29916,"def test_print_dot() -> None: # print Workflow cwl_path = get_data(""tests/wf/revsort.cwl"") expected_dot = pydot.graph_from_dot_data( """""" digraph {{ graph [bgcolor=""#eeeeee"", clusterrank=local, labeljust=right, labelloc=bottom ]; subgraph cluster_inputs {{ graph [label=""Workflow Inputs"", rank=same, style=dashed ]; ""workflow_input"" [fillcolor=""#94DDF4"", label=workflow_input, style=filled]; ""reverse_sort"" [fillcolor=""#94DDF4"", label=reverse_sort, style=filled]; }} subgraph cluster_outputs {{ graph [label=""Workflow Outputs"", labelloc=b, rank=same, style=dashed ]; ""sorted_output"" [fillcolor=""#94DDF4"", label=sorted_output, style=filled]; }} ""rev"" [fillcolor=lightgoldenrodyellow, label=rev, style=filled]; ""sorted"" [fillcolor=lightgoldenrodyellow, label=sorted, style=filled]; ""rev"" -> ""sorted""; ""sorted"" -> ""sorted_output""; ""workflow_input"" -> ""rev""; ""reverse_sort"" -> ""sorted""; }} """""".format() )[0] stdout = StringIO() assert main([""--debug"", ""--print-dot"", cwl_path], stdout=stdout) == 0 computed_dot = pydot.graph_from_dot_data(stdout.getvalue())[0] computed_edges = sorted( (source, target) for source, target in computed_dot.obj_dict[""edges""] ) expected_edges = sorted( (source, target) for source, target in expected_dot.obj_dict[""edges""] ) assert computed_edges == expected_edges # print CommandLineTool cwl_path = get_data(""tests/wf/echo.cwl"") stdout = StringIO() assert main([""--debug"", ""--print-dot"", cwl_path], stdout=stdout) == 1 ","def test_print_dot() -> None: # print Workflow cwl_path = get_data(""tests/wf/revsort.cwl"") expected_dot = pydot.graph_from_dot_data( """""" digraph {{ graph [bgcolor=""#eeeeee"", clusterrank=local, labeljust=right, labelloc=bottom ]; subgraph cluster_inputs {{ graph [label=""Workflow Inputs"", rank=same, style=dashed ]; ""workflow_input"" [fillcolor=""#94DDF4"", label=workflow_input, style=filled]; ""reverse_sort"" [fillcolor=""#94DDF4"", label=reverse_sort, style=filled]; }} subgraph cluster_outputs {{ graph [label=""Workflow Outputs"", labelloc=b, rank=same, style=dashed ]; ""sorted_output"" [fillcolor=""#94DDF4"", label=sorted_output, style=filled]; }} ""rev"" [fillcolor=lightgoldenrodyellow, label=rev, style=filled]; ""sorted"" [fillcolor=lightgoldenrodyellow, label=sorted, style=filled]; ""rev"" -> ""sorted""; ""sorted"" -> ""sorted_output""; ""workflow_input"" -> ""rev""; ""reverse_sort"" -> ""sorted""; }} """""".format() )[0] stdout = StringIO() assert main([""--debug"", ""--print-dot"", cwl_path], stdout=stdout) == 0 computed_dot = pydot.graph_from_dot_data(stdout.getvalue())[0] computed_edges = sorted( (source, target) for source, target in computed_dot.obj_dict[""edges""] ) expected_edges = sorted( (source, target) for source, target in expected_dot.obj_dict[""edges""] ) assert computed_edges == expected_edges # print CommandLineTool cwl_path = get_data(""tests/wf/echo.cwl"") stdout = StringIO() assert main([""--debug"", ""--print-dot"", cwl_path], stdout=stdout) == 1 " 27494,"def spark_streaming_from_pubsublite( project_number: int, location: str, subscription_id: str ) -> None: # [START pubsublite_spark_streaming_from_pubsublite] from pyspark.sql import SparkSession from pyspark.sql.types import StringType # TODO(developer): # project_number = 11223344556677 # location = ""us-central1-a"" # subscription_id = ""your-subscription-id"" spark = SparkSession.builder.appName(""read-app"").master(""yarn"").getOrCreate() sdf = ( spark.readStream.format(""pubsublite"") .option( ""pubsublite.subscription"", f""projects/{project_number}/locations/{location}/subscriptions/{subscription_id}"", ) .load() ) sdf = sdf.withColumn(""data"", sdf.data.cast(StringType())) query = ( sdf.writeStream.format(""console"") .outputMode(""append"") .trigger(processingTime=""1 second"") .start() ) query.awaitTermination(120) query.stop() # [END pubsublite_spark_streaming_from_pubsublite] ","def spark_streaming_from_pubsublite( project_number: int, region: str, subscription_id: str ) -> None: # [START pubsublite_spark_streaming_from_pubsublite] from pyspark.sql import SparkSession from pyspark.sql.types import StringType # TODO(developer): # project_number = 11223344556677 # location = ""us-central1-a"" # subscription_id = ""your-subscription-id"" spark = SparkSession.builder.appName(""read-app"").master(""yarn"").getOrCreate() sdf = ( spark.readStream.format(""pubsublite"") .option( ""pubsublite.subscription"", f""projects/{project_number}/locations/{location}/subscriptions/{subscription_id}"", ) .load() ) sdf = sdf.withColumn(""data"", sdf.data.cast(StringType())) query = ( sdf.writeStream.format(""console"") .outputMode(""append"") .trigger(processingTime=""1 second"") .start() ) query.awaitTermination(120) query.stop() # [END pubsublite_spark_streaming_from_pubsublite] " 10300,"def check_command(module, commandline): arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', 'ln': 'state=link', 'mkdir': 'state=directory', 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri', 'svn': 'subversion', 'service': 'service', 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template', 'dnf': 'dnf', 'zypper': 'zypper'} become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl'] if isinstance(commandline, list): command = commandline[0] else: command = commandline.split()[0] command = os.path.basename(command) disable_suffix = ""If you need to use {cmd} because {mod} is insufficient you can add"" \ "" 'warn: no' to this tasks's args: or set 'command_warnings=False'"" \ "" in ansible.cfg to get rid of this message."" substitutions = {'mod': None, 'cmd': command} if command in arguments: msg = ""Consider using the {mod} module with {subcmd} rather than running '{cmd}'. "" + disable_suffix substitutions['mod'] = 'file' substitutions['subcmd'] = arguments[command] module.warn(msg.format(**substitutions)) if command in commands: msg = ""Consider using the {mod} module rather than running '{cmd}'. "" + disable_suffix substitutions['mod'] = commands[command] module.warn(msg.format(**substitutions)) if command in become: module.warn(""Consider using 'become', 'become_method', and 'become_user' rather than running %s"" % (command,)) ","def check_command(module, commandline): arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group', 'ln': 'state=link', 'mkdir': 'state=directory', 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'} commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri', 'svn': 'subversion', 'service': 'service', 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt', 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template', 'dnf': 'dnf', 'zypper': 'zypper'} become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl'] if isinstance(commandline, list): command = commandline[0] else: command = commandline.split()[0] command = os.path.basename(command) disable_suffix = ""If you need to use {cmd} because {mod} is insufficient you can add"" \ "" 'warn: no' to this tasks's args: or set 'command_warnings=False'"" \ "" in the defaults section of ansible.cfg to get rid of this message."" substitutions = {'mod': None, 'cmd': command} if command in arguments: msg = ""Consider using the {mod} module with {subcmd} rather than running '{cmd}'. "" + disable_suffix substitutions['mod'] = 'file' substitutions['subcmd'] = arguments[command] module.warn(msg.format(**substitutions)) if command in commands: msg = ""Consider using the {mod} module rather than running '{cmd}'. "" + disable_suffix substitutions['mod'] = commands[command] module.warn(msg.format(**substitutions)) if command in become: module.warn(""Consider using 'become', 'become_method', and 'become_user' rather than running %s"" % (command,)) " 53756,"def _get_kernels(Xnew, iv, kernel, full_cov, full_output_cov): Kuf = covariances.Kuf(iv, kernel, Xnew) # [(R), M, N] fully_correlated = Kuf.shape.ndims == 4 if fully_correlated: Knn = kernel(Xnew, full_cov=full_cov, full_output_cov=full_output_cov) M, L, N, K = tf.unstack(tf.shape(Kuf), num=Kuf.shape.ndims, axis=0) Kuf = tf.reshape(Kuf, (M * L, N * K)) if full_cov == full_output_cov: Knn = tf.reshape(Knn, (N * K, N * K)) if full_cov else tf.reshape(Knn, (N * K,)) elif isinstance(kernel, (kernels.SeparateIndependent, kernels.IndependentLatent)): Knn = tf.stack([k(Xnew, full_cov=full_cov) for k in kernel.kernels], axis=0) elif isinstance(kernel, kernels.MultioutputKernel): Knn = kernel.kernel(Xnew, full_cov=full_cov) else: Knn = kernel(Xnew, full_cov=full_cov) return Kuf, Knn, fully_correlated ","def _get_kernels(Xnew, iv, kernel, full_cov, full_output_cov): Kuf = covariances.Kuf(iv, kernel, Xnew) # [(R), M, N] fully_correlated = Kuf.shape.ndims == 4 if fully_correlated: Knn = kernel(Xnew, full_cov=full_cov, full_output_cov=full_output_cov) M, L, N, K = tf.unstack(tf.shape(Kuf), num=Kuf.shape.ndims, axis=0) Kuf = tf.reshape(Kuf, (M * L, N * K)) if full_cov == full_output_cov: new_shape = (N * K, N * K) if full_cov else (N * K,) Knn = tf.reshape(Knn, new_shape) elif isinstance(kernel, (kernels.SeparateIndependent, kernels.IndependentLatent)): Knn = tf.stack([k(Xnew, full_cov=full_cov) for k in kernel.kernels], axis=0) elif isinstance(kernel, kernels.MultioutputKernel): Knn = kernel.kernel(Xnew, full_cov=full_cov) else: Knn = kernel(Xnew, full_cov=full_cov) return Kuf, Knn, fully_correlated " 55795,"def enable(name=None, verbose=True, deprecation=True): if LooseVersion(sklearn_version) < LooseVersion(""0.21.0""): raise NotImplementedError( ""daal4py patches apply for scikit-learn >= 0.21.0 only ..."") if name is not None: do_patch(name) else: for key in _get_map_of_algorithms(): do_patch(key) if deprecation: set_idp_sklearn_verbose() warnings.warn_explicit(""\nScikit-learn patching with daal4py is deprecated "" ""and will be removed in the future.\n"" ""Use Intel(R) Extension "" ""for Scikit-learn* module instead "" ""(pip install scikit-learn-intelex).\n"" ""To enable patching, please use one of the "" ""following options:\n"" ""1) python -m sklearnex \n"" ""2) Enable patching from your script:"" "" from sklearnex import patch_sklearn\n"" "" patch_sklearn()"", FutureWarning, ""dispatcher.py"", 151) if verbose and deprecation and sys.stderr is not None: sys.stderr.write( ""Intel(R) oneAPI Data Analytics Library solvers for sklearn enabled: "" ""https://intelpython.github.io/daal4py/sklearn.html\n"") if verbose and deprecation == False and sys.stderr is not None: sys.stderr.write( ""Intel(R) Extension for Scikit-learn* solvers enabled: "" ""https://intel.github.io/daal4py/scikit-learn-intelex\n"") ","def enable(name=None, verbose=True, deprecation=True): if LooseVersion(sklearn_version) < LooseVersion(""0.21.0""): raise NotImplementedError( ""daal4py patches apply for scikit-learn >= 0.21.0 only ..."") if name is not None: do_patch(name) else: for key in _get_map_of_algorithms(): do_patch(key) if deprecation: set_idp_sklearn_verbose() warnings.warn_explicit(""\nScikit-learn patching with daal4py is deprecated "" ""and will be removed in the future.\n"" ""Use Intel(R) Extension "" ""for Scikit-learn* module instead "" ""(pip install scikit-learn-intelex).\n"" ""To enable patching, please use one of the "" ""following options:\n"" ""1) From the command line:\n"" "" python -m sklearnex \n"" ""2) From your script:\n"" "" from sklearnex import patch_sklearn\n"" "" patch_sklearn()"", FutureWarning, ""dispatcher.py"", 151) if verbose and deprecation and sys.stderr is not None: sys.stderr.write( ""Intel(R) oneAPI Data Analytics Library solvers for sklearn enabled: "" ""https://intelpython.github.io/daal4py/sklearn.html\n"") if verbose and deprecation == False and sys.stderr is not None: sys.stderr.write( ""Intel(R) Extension for Scikit-learn* solvers enabled: "" ""https://intel.github.io/daal4py/scikit-learn-intelex\n"") " 31408,"def fetch_incidents(client: Client, last_run_violation: dict, first_fetch_violation: str, max_results: str): """""" This function will run each interval (default 1 minute) :type client: client :param client: Gamma client :type last_run_violation: dict :param last_run_violation: last violation ID that was queried from Gamma :type first_fetch_violation: int :param first_fetch_violation: if last_violation is None, then begin from this violation ID :type max_results: int :param max_results: the max number of violations to pull, bound by MAX_INCIDENTS_TO_FETCH """""" try: first_fetch_violation = int(first_fetch_violation) max_results = int(max_results) except: raise ValueError(""first_fetch_violation and max_limit must be integers"") if not first_fetch_violation > 0: raise ValueError(""first_fetch_violation must be equal to 1 or higher"") if not max_results > 0: max_results = 10 elif max_results > MAX_INCIDENTS_TO_FETCH: max_results = MAX_INCIDENTS_TO_FETCH # get the last violation id fetched, if exists starting_violation = last_run_violation.get('starting_violation', first_fetch_violation) most_recent_violation = starting_violation incidents = [] violations = client.get_violation_list(starting_violation, max_results) for item in violations['response']: incident_violation = item['violation_id'] incident_time_ms = item['violation_event_timestamp'] * 1000 if incident_violation <= most_recent_violation: continue incident = { ""name"": f'Gamma Violation {incident_violation}', ""occurred"": timestamp_to_datestring(incident_time_ms), ""rawJSON"": json.dumps(item) } incidents.append(incident) # update last run if violation id is greater than last fetch if incident_violation > most_recent_violation: most_recent_violation = incident_violation next_run_violation = {'starting_violation': most_recent_violation} return next_run_violation, incidents ","def fetch_incidents(client: Client, last_run_violation: dict, first_fetch_violation: str, max_results: str): """""" This function will run each interval (default 1 minute) :type client: client :param client: Gamma client :type last_run_violation: dict :param last_run_violation: last violation ID that was queried from Gamma :type first_fetch_violation: int :param first_fetch_violation: if last_violation is None, then begin from this violation ID :type max_results: int :param max_results: the max number of violations to pull, bound by MAX_INCIDENTS_TO_FETCH """""" try: first_fetch_violation = int(first_fetch_violation) max_results = int(max_results) except: raise ValueError(""first_fetch_violation and max_limit must be integers"") if not first_fetch_violation > 0: raise ValueError(""first_fetch_violation must be equal to 1 or higher"") if max_results <= 0: max_results = 10 elif max_results > MAX_INCIDENTS_TO_FETCH: max_results = MAX_INCIDENTS_TO_FETCH # get the last violation id fetched, if exists starting_violation = last_run_violation.get('starting_violation', first_fetch_violation) most_recent_violation = starting_violation incidents = [] violations = client.get_violation_list(starting_violation, max_results) for item in violations['response']: incident_violation = item['violation_id'] incident_time_ms = item['violation_event_timestamp'] * 1000 if incident_violation <= most_recent_violation: continue incident = { ""name"": f'Gamma Violation {incident_violation}', ""occurred"": timestamp_to_datestring(incident_time_ms), ""rawJSON"": json.dumps(item) } incidents.append(incident) # update last run if violation id is greater than last fetch if incident_violation > most_recent_violation: most_recent_violation = incident_violation next_run_violation = {'starting_violation': most_recent_violation} return next_run_violation, incidents " 30359,"def search_mail(): """""" Newer endpoint to search for mails """""" headers = [] # type: List[Any] contents = [] # type: List[Any] context = {} # type: Dict[Any, Any] search_params = {} # type: Dict[Any, Any] advanced_params = {} # type: Dict[Any, Any] search_reason = demisto.args().get('search_reason', '').encode('utf-8') start = demisto.args().get('start', '').encode('utf-8') end = demisto.args().get('end', '').encode('utf-8') sender_ip = demisto.args().get('sender_ip', '').encode('utf-8') mail_to = demisto.args().get('mail_to', '').encode('utf-8') mail_from_ = demisto.args().get('mail_from', '').encode('utf-8') subject = demisto.args().get('subject', '').encode('utf-8') message_id = demisto.args().get('message_id', '').encode('utf-8') limit = int(demisto.args().get('limit', 100)) if search_reason: search_params['searchReason'] = search_reason if start: search_params['start'] = start if end: search_params['end'] = end if message_id: search_params['messageId'] = message_id if mail_from_: advanced_params['from'] = mail_from_ if mail_to: advanced_params['to'] = mail_to if subject: advanced_params['subject'] = subject if sender_ip: advanced_params['senderIP'] = sender_ip search_params['advancedTrackAndTraceOptions'] = advanced_params tracked_mails = search_mail_request(search_params) if limit: tracked_mails = tracked_mails[:limit] for tracked_mail in tracked_mails: to = [] # type: List[Any] receivers = tracked_mail.get('to') for receiver in receivers: to.append({ 'RecipientAddress': receiver.get('recipientAddress'), 'TaggedExternal': receiver.get('taggedExternal') }) contents.append({ 'Status': tracked_mail.get('status'), 'Received': tracked_mail.get('received'), 'FromEnv': { 'DisplayableName': tracked_mail.get('fromEnv', {}).get('displayableName'), 'EmailAddress': tracked_mail.get('fromEnv', {}).get('emailAddress') }, 'FromHdr': { 'DisplayableName': tracked_mail.get('FromHdr', {}).get('displayableName'), 'EmailAddress': tracked_mail.get('FromHdr', {}).get('emailAddress') }, 'Attachments': tracked_mail.get('attachments'), 'To': to, 'SenderIP': tracked_mail.get('senderIP'), 'Route': tracked_mail.get('route'), 'ID': tracked_mail.get('id'), 'Sent': tracked_mail.get('sent'), 'Subject': tracked_mail.get('subject') }) context['Mimecast.TrackedEmails'] = contents results = { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': contents, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Mimecast Search: ', contents, headers), 'EntryContext': context } return results ","def search_mail(): """""" Newer endpoint to search for mails """""" headers = [] # type: List[Any] contents = [] # type: List[Any] context = {} # type: Dict[Any, Any] search_params = {} # type: Dict[Any, Any] advanced_params = {} # type: Dict[Any, Any] search_reason = demisto.args().get('search_reason', '').encode('utf-8') start = demisto.args().get('start', '').encode('utf-8') end = demisto.args().get('end', '').encode('utf-8') sender_ip = demisto.args().get('sender_ip', '').encode('utf-8') mail_to = demisto.args().get('mail_to', '').encode('utf-8') mail_from_ = demisto.args().get('mail_from', '').encode('utf-8') subject = demisto.args().get('subject', '').encode('utf-8') message_id = demisto.args().get('message_id', '').encode('utf-8') limit = int(demisto.args().get('limit', 100)) if search_reason: search_params['searchReason'] = search_reason if start: search_params['start'] = start if end: search_params['end'] = end if message_id: search_params['messageId'] = message_id if mail_from_: advanced_params['from'] = mail_from_ if mail_to: advanced_params['to'] = mail_to if subject: advanced_params['subject'] = subject if sender_ip: advanced_params['senderIP'] = sender_ip search_params['advancedTrackAndTraceOptions'] = advanced_params tracked_mails = search_mail_request(search_params) if limit: tracked_mails = tracked_mails[:limit] for tracked_mail in tracked_mails: to = [] # type: List[Any] receivers = tracked_mail.get('to', []) for receiver in receivers: to.append({ 'RecipientAddress': receiver.get('recipientAddress'), 'TaggedExternal': receiver.get('taggedExternal') }) contents.append({ 'Status': tracked_mail.get('status'), 'Received': tracked_mail.get('received'), 'FromEnv': { 'DisplayableName': tracked_mail.get('fromEnv', {}).get('displayableName'), 'EmailAddress': tracked_mail.get('fromEnv', {}).get('emailAddress') }, 'FromHdr': { 'DisplayableName': tracked_mail.get('FromHdr', {}).get('displayableName'), 'EmailAddress': tracked_mail.get('FromHdr', {}).get('emailAddress') }, 'Attachments': tracked_mail.get('attachments'), 'To': to, 'SenderIP': tracked_mail.get('senderIP'), 'Route': tracked_mail.get('route'), 'ID': tracked_mail.get('id'), 'Sent': tracked_mail.get('sent'), 'Subject': tracked_mail.get('subject') }) context['Mimecast.TrackedEmails'] = contents results = { 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': contents, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Mimecast Search: ', contents, headers), 'EntryContext': context } return results " 22679,"def build_lexicon_config(config_dict): # type: (dict) -> ConfigResolver """""" Convenient function to build a Lexicon 3.x config object. :param dict config_dict: the configuration specifics to apply :return: an instanciated ConfigResolver object :rtype: ConfigResolver """""" return ConfigResolver().with_dict(config_dict).with_env() ","def build_lexicon_config(config_dict): # type: (dict) -> ConfigResolver """""" Convenient function to build a Lexicon 3.x config object. :param dict config_dict: the configuration specifics to apply :return: an instantiated ConfigResolver object :rtype: ConfigResolver """""" return ConfigResolver().with_dict(config_dict).with_env() " 6334,"def test_matcher(wmsClient: WMSClient): # insert a proper DN to run the test resourceDescription = { ""OwnerGroup"": ""prod"", ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""DIRACVersion"": ""pippo"", ""GridCE"": ""some.grid.ce.org"", ""ReleaseVersion"": ""blabla"", ""VirtualOrganization"": ""LHCb"", ""PilotInfoReportedFlag"": ""True"", ""PilotBenchmark"": ""anotherPilot"", ""Site"": ""DIRAC.Jenkins.ch"", ""CPUTime"": 86400, } job = helloWorldJob() job.setDestination(""DIRAC.Jenkins.ch"") job.setInputData(""/a/bbb"") job.setType(""User"") jobDescription = createFile(job) res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription)) assert res[""OK""] is True, res[""Message""] jobID = res[""Value""] # forcing the update res = JobStateUpdateClient().setJobStatus(jobID, JobStatus.WAITING, ""matching"", ""source"", None, True) assert res[""OK""] is True, res[""Message""] tqDB = TaskQueueDB() tqDefDict = { ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""OwnerGroup"": ""prod"", ""Setup"": ""dirac-JenkinsSetup"", ""CPUTime"": 86400, } res = tqDB.insertJob(jobID, tqDefDict, 10) assert res[""OK""] is True, res[""Message""] res = MatcherClient().requestJob(resourceDescription) print(res) assert res[""OK""] is True, res[""Message""] wmsClient.deleteJob(jobID) ","def test_matcher(wmsClient: WMSClient): # insert a proper DN to run the test resourceDescription = { ""OwnerGroup"": ""prod"", ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""DIRACVersion"": ""pippo"", ""GridCE"": ""some.grid.ce.org"", ""ReleaseVersion"": ""blabla"", ""VirtualOrganization"": ""LHCb"", ""PilotInfoReportedFlag"": ""True"", ""PilotBenchmark"": ""anotherPilot"", ""Site"": ""DIRAC.Jenkins.ch"", ""CPUTime"": 86400, } job = helloWorldJob() job.setDestination(""DIRAC.Jenkins.ch"") job.setInputData(""/a/bbb"") job.setType(""User"") jobDescription = createFile(job) res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription)) assert res[""OK""] is True, res[""Message""] jobID = res[""Value""] # forcing the update res = JobStateUpdateClient().setJobStatus(jobID, JobStatus.WAITING, ""matching"", ""source"", None, True) assert res[""OK""], res[""Message""] tqDB = TaskQueueDB() tqDefDict = { ""OwnerDN"": ""/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser"", ""OwnerGroup"": ""prod"", ""Setup"": ""dirac-JenkinsSetup"", ""CPUTime"": 86400, } res = tqDB.insertJob(jobID, tqDefDict, 10) assert res[""OK""] is True, res[""Message""] res = MatcherClient().requestJob(resourceDescription) print(res) assert res[""OK""] is True, res[""Message""] wmsClient.deleteJob(jobID) " 45497,"def _get_files_outside_toc( toc: Path, sourcedir: Path, excluded_patterns: Collection[str] ): source_root = sourcedir or Path() source_files = {f for f in glob(str(source_root / ""**/*""), recursive=True)} excluded_file_sets = [set(glob(p, recursive=True)) for p in excluded_patterns] included_files: Set[str] = { Path(relpath(f, source_root)).as_posix() for f in source_files.difference(*excluded_file_sets) if not isdir(f) } toc_yaml = yaml.safe_load(toc.read_text(encoding=""utf8"")) from nested_lookup import nested_lookup toc_files = {f for f in nested_lookup(""file"", toc_yaml)} verified_toc_files: Set[str] = { Path(f).as_posix() for f in included_files if os.path.splitext(f)[0] in toc_files } return included_files.difference(verified_toc_files) ","def _get_files_outside_toc( toc: Path, sourcedir: Path, excluded_patterns: Collection[str] ): source_root = sourcedir or Path() source_files = {ff for ff in glob(str(source_root / ""**/*""), recursive=True)} excluded_file_sets = [set(glob(p, recursive=True)) for p in excluded_patterns] included_files: Set[str] = { Path(relpath(f, source_root)).as_posix() for f in source_files.difference(*excluded_file_sets) if not isdir(f) } toc_yaml = yaml.safe_load(toc.read_text(encoding=""utf8"")) from nested_lookup import nested_lookup toc_files = {f for f in nested_lookup(""file"", toc_yaml)} verified_toc_files: Set[str] = { Path(f).as_posix() for f in included_files if os.path.splitext(f)[0] in toc_files } return included_files.difference(verified_toc_files) " 31028,"def main() -> None: try: api_key = demisto.params().get('apikey') verify = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) headers = { 'Authorization': f'Bearer {api_key}' } client = Client(headers, verify, proxy) if demisto.command() == 'CloudConvert-import': return_results(import_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-convert': return_results(convert_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-checkstatus': return_results(check_status_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-export': return_results(export_command(client, demisto.args())) elif demisto.command() == 'test-module': return_results(test_module(client)) except Exception as e: err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \ ('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e)) return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}') ","def main() -> None: try: api_key = demisto.params().get('apikey') verify = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) headers = { 'Authorization': f'Bearer {api_key}' } client = Client(headers, verify, proxy) if demisto.command() == 'cloud-convert-import': return_results(import_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-convert': return_results(convert_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-checkstatus': return_results(check_status_command(client, demisto.args())) elif demisto.command() == 'CloudConvert-export': return_results(export_command(client, demisto.args())) elif demisto.command() == 'test-module': return_results(test_module(client)) except Exception as e: err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \ ('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e)) return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}') " 25945,"def upgrade_vmss_extension(cmd, resource_group_name, vm_scale_set_name, no_wait=False): client = _compute_client_factory(cmd.cli_ctx) return sdk_no_wait(no_wait, client.virtual_machine_scale_set_rolling_upgrades.begin_start_extension_upgrade , resource_group_name, vm_scale_set_name) # endregion ","def upgrade_vmss_extension(cmd, resource_group_name, vm_scale_set_name, no_wait=False): client = _compute_client_factory(cmd.cli_ctx) return sdk_no_wait(no_wait, client.virtual_machine_scale_set_rolling_upgrades.begin_start_extension_upgrade, resource_group_name, vm_scale_set_name) # endregion " 45179,"def is_async_gen_fn(func): """""" Returns `True` if a function is an async generator. """""" while hasattr(func, ""__wrapped__""): func = func.__wrapped__ return inspect.isasyncgenfunction(func) ","def is_async_gen_fn(func: Callable) -> bool: """""" Returns `True` if a function is an async generator. """""" while hasattr(func, ""__wrapped__""): func = func.__wrapped__ return inspect.isasyncgenfunction(func) " 43969,"def advanced_spectrum(qnode, encoding_args=None, argnum=None, decimals=5, validation_kwargs=None): r""""""Compute the frequency spectrum of the Fourier representation of quantum circuits, including classical preprocessing. The circuit must only use single-parameter gates of the form :math:`e^{-i x_j G}` as input-encoding gates, which allows the computation of the spectrum by inspecting the gates' generators :math:`G`. The most important example of such gates are Pauli rotations. .. note:: More precisely, the ``advanced_spectrum`` function relies on the gate to define a ``generator``, and will fail if gates marked controlled by marked parameters do not have this attribute. The argument ``argnum`` controls which QNode arguments are considered as encoded inputs and the spectrum is computed only for these arguments. The input-encoding *gates* are those that are controlled by input-encoding QNode arguments. If no ``argnum`` are given, all QNode arguments are considered to be input-encoding arguments. .. note:: Arguments or parameters in an argument that do not contribute to the Fourier series of the QNode with a frequency are considered as contributing with a constant term. That is, a parameter that does not control any gate has the spectrum ``[0]``. Args: qnode (pennylane.QNode): :class:`~.pennylane.QNode` to compute the spectrum for encoding_args (dict[str, list[tuple]], set): Parameter index dictionary; keys are argument names, values are index tuples for that argument or an ``Ellipsis``. If a ``set``, all values are set to ``Ellipsis``. The contained argument and parameter indices indicate the scalar variables for which the spectrum is computed argnum (list[int]): Numerical indices for arguments with respect to which to compute the spectrum decimals (int): number of decimals to which to round frequencies. num_pos (int): Number of additional random positions at which to evaluate the Jacobian of the preprocessing and test that it is constant. Setting ``num_pos=0`` will deactivate the test. Returns: function: Function which accepts the same arguments as the QNode. When called, this function will return a dictionary of dictionaries containing the frequency spectra per QNode parameter. **Details** A circuit that returns an expectation value of a Hermitian observable which depends on :math:`N` scalar inputs :math:`x_j` can be interpreted as a function :math:`f: \mathbb{R}^N \rightarrow \mathbb{R}` (as the observable is Hermitian, the expectation value is real-valued). This function can always be expressed by a Fourier-type sum .. math:: \sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N} c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N} over the *frequency spectra* :math:`\Omega_j \subseteq \mathbb{R},` :math:`j=1,\dots,N`. Each spectrum has the property that :math:`0 \in \Omega_j`, and the spectrum is symmetric (i.e., for every :math:`\omega \in \Omega_j` we have that :math:`-\omega \in\Omega_j`). If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*. As shown in `Vidal and Theis (2019) `_ and `Schuld, Sweke and Meyer (2020) `_, if an input :math:`x_j, j = 1 \dots N`, only enters into single-parameter gates of the form :math:`e^{-i x_j G}` (where :math:`G` is a Hermitian generator), the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues of the generators :math:`G`. In many situations, the spectra are limited to a few frequencies only, which in turn limits the function class that the circuit can express. The ``advanced_spectrum`` function computes all frequencies that will potentially appear in the sets :math:`\Omega_1` to :math:`\Omega_N`. .. note:: In more detail, the ``advanced_spectrum`` function also allows for preprocessing of the QNode arguments before they are fed into the gates, as long as this processing is *linear*. In particular, constant prefactors for the encoding arguments are allowed. **Example** Consider the following example, which uses non-trainable inputs ``x``, ``y`` and ``z`` as well as trainable parameters ``w`` as arguments to the QNode. .. code-block:: python import pennylane as qml import numpy as np from advanced_spectrum import advanced_spectrum n_qubits = 3 dev = qml.device(""default.qubit"", wires=n_qubits) @qml.qnode(dev) def circuit(x, y, z, w): for i in range(n_qubits): qml.RX(0.5*x[i], wires=i) qml.Rot(w[0,i,0], w[0,i,1], w[0,i,2], wires=i) qml.RY(2.3*y[i], wires=i) qml.Rot(w[1,i,0], w[1,i,1], w[1,i,2], wires=i) qml.RX(z, wires=i) return qml.expval(qml.PauliZ(wires=0)) x = np.array([1., 2., 3.]) y = np.array([0.1, 0.3, 0.5]) z = -1.8 w = np.random.random((2, n_qubits, 3)) res = advanced_spectrum(circuit, argnum=[0, 1, 2])(x, y, z, w) This circuit looks as follows: >>> print(qml.draw(circuit)(x, y, z, w)) 0: ──RX(0.5)──Rot(0.598, 0.949, 0.346)───RY(0.23)──Rot(0.693, 0.0738, 0.246)──RX(-1.8)──┤ ⟨Z⟩ 1: ──RX(1)────Rot(0.0711, 0.701, 0.445)──RY(0.69)──Rot(0.32, 0.0482, 0.437)───RX(-1.8)──┤ 2: ──RX(1.5)──Rot(0.401, 0.0795, 0.731)──RY(1.15)──Rot(0.756, 0.38, 0.38)─────RX(-1.8)──┤ Applying the ``advanced_spectrum`` function to the circuit for the non-trainable parameters, we obtain: >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""x"": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]} ""y"": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} ""z"": {(): [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]} .. note:: While the Fourier spectrum usually does not depend on trainable circuit parameters or the actual values of the inputs, it may still change based on inputs to the QNode that alter the architecture of the circuit. Above, we selected all input-encoding parameters for the spectrum computation, using the ``argnum`` keyword argument. We may also restrict the full analysis to a single QNode argument, again using ``argnum``: >>> res = advanced_spectrum(circuit, argnum=[0])(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""x"": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]} Selecting arguments by name instead of index is possible via the ``encoding_args`` argument: >>> res = advanced_spectrum(circuit, encoding_args={""y""})(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""y"": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} Note that for array-valued arguments the spectrum for each element of the array is computed. A more fine-grained control is available by passing index tuples for the respective argument name in ``encoding_args``: >>> encoding_args = {""y"": [(0,),(2,)]} >>> res = advanced_spectrum(circuit, encoding_args=encoding_args)(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""y"": {(0,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} .. warning:: The ``advanced_spectrum`` function does not check if the result of the circuit is an expectation value. It checks whether the classical preprocessing between QNode and gate arguments is linear by computing the Jacobian of the processing at multiple points. This makes it unlikely -- *but not impossible* -- that non-linear functions go undetected. The number of additional points at which the Jacobian is computed can be controlled via ``num_pos``, and the test is deactivated if ``num_pos=0`` (discouraged). Furthermore, the QNode arguments *not* marked in ``argnum`` will not be considered in this test and if they resemble encoded inputs, the entire spectrum might be incorrect or the circuit might not even admit one. The ``advanced_spectrum`` function works in all interfaces: .. code-block:: python import tensorflow as tf dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev, interface='tf') def circuit(x): qml.RX(0.4*x[0], wires=0) qml.PhaseShift(x[1]*np.pi, wires=0) return qml.expval(qml.PauliZ(wires=0)) x = tf.constant([1., 2.]) res = advanced_spectrum(circuit)(x) >>> print(res) {""x"": {(0,): [-0.4, 0.0, 0.4], (1,): [-3.14159, 0.0, 3.14159]}} """""" validation_kwargs = validation_kwargs or {} encoding_args, argnum = _process_ids(encoding_args, argnum, qnode) atol = 10 ** (-decimals) if decimals is not None else 1e-10 # A map between Jacobians (contiguous) and arg names (may be discontiguous) arg_name_map = dict(enumerate(encoding_args)) @wraps(qnode) def wrapper(*args, **kwargs): # Compute classical Jacobian and assert preprocessing is linear jac_fn = qml.transforms.classical_jacobian(qnode, argnum=argnum, expand_fn=expand_multi_par) if not qml.math.is_independent(jac_fn, qnode.interface, args, kwargs, **validation_kwargs): raise ValueError( ""The Jacobian of the classical preprocessing in the provided QNode "" ""is not constant; only linear classical preprocessing is supported."" ) class_jacs = jac_fn(*args, **kwargs) spectra = {} tape = expand_multi_par(qnode.qtape) par_info = tape._par_info # pylint: disable=protected-access for jac_idx, class_jac in enumerate(class_jacs): arg_name = arg_name_map[jac_idx] if encoding_args[arg_name] is Ellipsis: requested_par_ids = set(product(*(range(sh) for sh in class_jac.shape[1:]))) else: requested_par_ids = set(encoding_args[arg_name]) _spectra = {par_idx: {0} for par_idx in requested_par_ids} for op_idx, jac_of_op in enumerate(np.round(class_jac, decimals=decimals)): op = par_info[op_idx][""op""] # Find parameters that where requested and feed into the operation if len(class_jac.shape) == 1: # Scalar argument, only axis of Jacobian is for gates if np.isclose(jac_of_op, 0.0, atol=atol, rtol=0): continue jac_of_op = {(): jac_of_op} par_ids = {()} else: par_ids = zip(*[map(int, _ids) for _ids in np.where(jac_of_op)]) par_ids = set(par_ids).intersection(requested_par_ids) if len(par_ids) == 0: continue # Multi-parameter gates are not supported if len(op.parameters) != 1: raise ValueError( ""Can only consider one-parameter gates as data-encoding gates; "" f""got {op.name}."" ) spec = _get_spectrum(op, decimals=decimals) for par_idx in par_ids: scale = float(jac_of_op[par_idx]) scaled_spec = [scale * f for f in spec] _spectra[par_idx] = _join_spectra(_spectra[par_idx], scaled_spec) # Construct the sorted spectrum also containing negative frequencies for idx, spec in _spectra.items(): spec = sorted(spec) _spectra[idx] = [-freq for freq in spec[:0:-1]] + spec spectra[arg_name] = _spectra return spectra return wrapper ","def advanced_spectrum(qnode, encoding_args=None, argnum=None, decimals=5, validation_kwargs=None): r""""""Compute the frequency spectrum of the Fourier representation of quantum circuits, including classical preprocessing. The circuit must only use single-parameter gates of the form :math:`e^{-i x_j G}` as input-encoding gates, which allows the computation of the spectrum by inspecting the gates' generators :math:`G`. The most important example of such gates are Pauli rotations. .. note:: More precisely, the ``advanced_spectrum`` function relies on the gate to define a ``generator``, and will fail if gates marked controlled by marked parameters do not have this attribute. The argument ``argnum`` controls which QNode arguments are considered as encoded inputs and the spectrum is computed only for these arguments. The input-encoding *gates* are those that are controlled by input-encoding QNode arguments. If no ``argnum`` is given, all QNode arguments are considered to be input-encoding arguments. .. note:: Arguments or parameters in an argument that do not contribute to the Fourier series of the QNode with a frequency are considered as contributing with a constant term. That is, a parameter that does not control any gate has the spectrum ``[0]``. Args: qnode (pennylane.QNode): :class:`~.pennylane.QNode` to compute the spectrum for encoding_args (dict[str, list[tuple]], set): Parameter index dictionary; keys are argument names, values are index tuples for that argument or an ``Ellipsis``. If a ``set``, all values are set to ``Ellipsis``. The contained argument and parameter indices indicate the scalar variables for which the spectrum is computed argnum (list[int]): Numerical indices for arguments with respect to which to compute the spectrum decimals (int): number of decimals to which to round frequencies. num_pos (int): Number of additional random positions at which to evaluate the Jacobian of the preprocessing and test that it is constant. Setting ``num_pos=0`` will deactivate the test. Returns: function: Function which accepts the same arguments as the QNode. When called, this function will return a dictionary of dictionaries containing the frequency spectra per QNode parameter. **Details** A circuit that returns an expectation value of a Hermitian observable which depends on :math:`N` scalar inputs :math:`x_j` can be interpreted as a function :math:`f: \mathbb{R}^N \rightarrow \mathbb{R}` (as the observable is Hermitian, the expectation value is real-valued). This function can always be expressed by a Fourier-type sum .. math:: \sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N} c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N} over the *frequency spectra* :math:`\Omega_j \subseteq \mathbb{R},` :math:`j=1,\dots,N`. Each spectrum has the property that :math:`0 \in \Omega_j`, and the spectrum is symmetric (i.e., for every :math:`\omega \in \Omega_j` we have that :math:`-\omega \in\Omega_j`). If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*. As shown in `Vidal and Theis (2019) `_ and `Schuld, Sweke and Meyer (2020) `_, if an input :math:`x_j, j = 1 \dots N`, only enters into single-parameter gates of the form :math:`e^{-i x_j G}` (where :math:`G` is a Hermitian generator), the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues of the generators :math:`G`. In many situations, the spectra are limited to a few frequencies only, which in turn limits the function class that the circuit can express. The ``advanced_spectrum`` function computes all frequencies that will potentially appear in the sets :math:`\Omega_1` to :math:`\Omega_N`. .. note:: In more detail, the ``advanced_spectrum`` function also allows for preprocessing of the QNode arguments before they are fed into the gates, as long as this processing is *linear*. In particular, constant prefactors for the encoding arguments are allowed. **Example** Consider the following example, which uses non-trainable inputs ``x``, ``y`` and ``z`` as well as trainable parameters ``w`` as arguments to the QNode. .. code-block:: python import pennylane as qml import numpy as np from advanced_spectrum import advanced_spectrum n_qubits = 3 dev = qml.device(""default.qubit"", wires=n_qubits) @qml.qnode(dev) def circuit(x, y, z, w): for i in range(n_qubits): qml.RX(0.5*x[i], wires=i) qml.Rot(w[0,i,0], w[0,i,1], w[0,i,2], wires=i) qml.RY(2.3*y[i], wires=i) qml.Rot(w[1,i,0], w[1,i,1], w[1,i,2], wires=i) qml.RX(z, wires=i) return qml.expval(qml.PauliZ(wires=0)) x = np.array([1., 2., 3.]) y = np.array([0.1, 0.3, 0.5]) z = -1.8 w = np.random.random((2, n_qubits, 3)) res = advanced_spectrum(circuit, argnum=[0, 1, 2])(x, y, z, w) This circuit looks as follows: >>> print(qml.draw(circuit)(x, y, z, w)) 0: ──RX(0.5)──Rot(0.598, 0.949, 0.346)───RY(0.23)──Rot(0.693, 0.0738, 0.246)──RX(-1.8)──┤ ⟨Z⟩ 1: ──RX(1)────Rot(0.0711, 0.701, 0.445)──RY(0.69)──Rot(0.32, 0.0482, 0.437)───RX(-1.8)──┤ 2: ──RX(1.5)──Rot(0.401, 0.0795, 0.731)──RY(1.15)──Rot(0.756, 0.38, 0.38)─────RX(-1.8)──┤ Applying the ``advanced_spectrum`` function to the circuit for the non-trainable parameters, we obtain: >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""x"": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]} ""y"": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} ""z"": {(): [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]} .. note:: While the Fourier spectrum usually does not depend on trainable circuit parameters or the actual values of the inputs, it may still change based on inputs to the QNode that alter the architecture of the circuit. Above, we selected all input-encoding parameters for the spectrum computation, using the ``argnum`` keyword argument. We may also restrict the full analysis to a single QNode argument, again using ``argnum``: >>> res = advanced_spectrum(circuit, argnum=[0])(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""x"": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]} Selecting arguments by name instead of index is possible via the ``encoding_args`` argument: >>> res = advanced_spectrum(circuit, encoding_args={""y""})(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""y"": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} Note that for array-valued arguments the spectrum for each element of the array is computed. A more fine-grained control is available by passing index tuples for the respective argument name in ``encoding_args``: >>> encoding_args = {""y"": [(0,),(2,)]} >>> res = advanced_spectrum(circuit, encoding_args=encoding_args)(x, y, z, w) >>> for inp, freqs in res.items(): >>> print(f""{inp}: {freqs}"") ""y"": {(0,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]} .. warning:: The ``advanced_spectrum`` function does not check if the result of the circuit is an expectation value. It checks whether the classical preprocessing between QNode and gate arguments is linear by computing the Jacobian of the processing at multiple points. This makes it unlikely -- *but not impossible* -- that non-linear functions go undetected. The number of additional points at which the Jacobian is computed can be controlled via ``num_pos``, and the test is deactivated if ``num_pos=0`` (discouraged). Furthermore, the QNode arguments *not* marked in ``argnum`` will not be considered in this test and if they resemble encoded inputs, the entire spectrum might be incorrect or the circuit might not even admit one. The ``advanced_spectrum`` function works in all interfaces: .. code-block:: python import tensorflow as tf dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev, interface='tf') def circuit(x): qml.RX(0.4*x[0], wires=0) qml.PhaseShift(x[1]*np.pi, wires=0) return qml.expval(qml.PauliZ(wires=0)) x = tf.constant([1., 2.]) res = advanced_spectrum(circuit)(x) >>> print(res) {""x"": {(0,): [-0.4, 0.0, 0.4], (1,): [-3.14159, 0.0, 3.14159]}} """""" validation_kwargs = validation_kwargs or {} encoding_args, argnum = _process_ids(encoding_args, argnum, qnode) atol = 10 ** (-decimals) if decimals is not None else 1e-10 # A map between Jacobians (contiguous) and arg names (may be discontiguous) arg_name_map = dict(enumerate(encoding_args)) @wraps(qnode) def wrapper(*args, **kwargs): # Compute classical Jacobian and assert preprocessing is linear jac_fn = qml.transforms.classical_jacobian(qnode, argnum=argnum, expand_fn=expand_multi_par) if not qml.math.is_independent(jac_fn, qnode.interface, args, kwargs, **validation_kwargs): raise ValueError( ""The Jacobian of the classical preprocessing in the provided QNode "" ""is not constant; only linear classical preprocessing is supported."" ) class_jacs = jac_fn(*args, **kwargs) spectra = {} tape = expand_multi_par(qnode.qtape) par_info = tape._par_info # pylint: disable=protected-access for jac_idx, class_jac in enumerate(class_jacs): arg_name = arg_name_map[jac_idx] if encoding_args[arg_name] is Ellipsis: requested_par_ids = set(product(*(range(sh) for sh in class_jac.shape[1:]))) else: requested_par_ids = set(encoding_args[arg_name]) _spectra = {par_idx: {0} for par_idx in requested_par_ids} for op_idx, jac_of_op in enumerate(np.round(class_jac, decimals=decimals)): op = par_info[op_idx][""op""] # Find parameters that where requested and feed into the operation if len(class_jac.shape) == 1: # Scalar argument, only axis of Jacobian is for gates if np.isclose(jac_of_op, 0.0, atol=atol, rtol=0): continue jac_of_op = {(): jac_of_op} par_ids = {()} else: par_ids = zip(*[map(int, _ids) for _ids in np.where(jac_of_op)]) par_ids = set(par_ids).intersection(requested_par_ids) if len(par_ids) == 0: continue # Multi-parameter gates are not supported if len(op.parameters) != 1: raise ValueError( ""Can only consider one-parameter gates as data-encoding gates; "" f""got {op.name}."" ) spec = _get_spectrum(op, decimals=decimals) for par_idx in par_ids: scale = float(jac_of_op[par_idx]) scaled_spec = [scale * f for f in spec] _spectra[par_idx] = _join_spectra(_spectra[par_idx], scaled_spec) # Construct the sorted spectrum also containing negative frequencies for idx, spec in _spectra.items(): spec = sorted(spec) _spectra[idx] = [-freq for freq in spec[:0:-1]] + spec spectra[arg_name] = _spectra return spectra return wrapper " 30913,"def main(): try: entry_context = { ""TroubleShout"": { 'Engine': { 'SSL/TLS': docker_container_details() }, 'Endpoint': { 'SSL/TLS': endpoint_certificate(""google.com"", ""443"") } } } human_readable = build_human_readable(entry_context) return_outputs(human_readable, entry_context, {}) except Exception as e: return_error(f'Failed to execute Certificate Troubleshout.\n Error: {str(e)}') ","def main(): try: entry_context = { ""TroubleShout"": { 'XSOAR': { 'SSL/TLS': docker_container_details() }, 'Endpoint': { 'SSL/TLS': endpoint_certificate(""google.com"", ""443"") } } } human_readable = build_human_readable(entry_context) return_outputs(human_readable, entry_context, {}) except Exception as e: return_error(f'Failed to execute Certificate Troubleshout.\n Error: {str(e)}') " 46019,"def build_laplacian_pyramid( input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[torch.Tensor]: r""""""Construct the Laplacian pyramid for an image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect BxCxHxW. Got: {input.shape}"") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f""Invalid max_level, it must be a positive integer. Got: {max_level}"") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid ","def build_laplacian_pyramid( input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[torch.Tensor]: r""""""Construct the Laplacian pyramid for a tensor image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect BxCxHxW. Got: {input.shape}"") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f""Invalid max_level, it must be a positive integer. Got: {max_level}"") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid " 39054,"def ipvXmessage(version=None): if version == 6: ip_repr = ""%s://[%s]:%d"" elif version == 4: ip_repr = ""%s://%s:%d"" message = f""Uvicorn running on {ip_repr} (Press CTRL+C to quit)"" color_message = ( ""Uvicorn running on "" + click.style(ip_repr, bold=True) + "" (Press CTRL+C to quit)"" ) return message, color_message ","def ipvXmessage(version=None): if version == 6: ip_repr = ""%s://[%s]:%d"" else: ip_repr = ""%s://%s:%d"" message = f""Uvicorn running on {ip_repr} (Press CTRL+C to quit)"" color_message = ( ""Uvicorn running on "" + click.style(ip_repr, bold=True) + "" (Press CTRL+C to quit)"" ) return message, color_message " 41239,"def mixture( val: Any, default: Any = RaiseTypeErrorIfNotProvided ) -> Union[Sequence[Tuple[float, np.ndarray]], TDefault]: """"""Return a sequence of tuples representing a probabilistic unitary. A mixture is described by an iterable of tuples of the form (probability of unitary, unitary as numpy array) The probability components of the tuples must sum to 1.0 and be non-negative. Determines the Mixture representation of `val` by the following strategies: 1. Try to use `val._mixture_()`. Case a) Method not present or returns `None`. Continue to next strategy. Case b) Returns the Mixture representation. Return the result. 2. Try to use `unitary()`. Case a) Method not present or returns `None`. Continue to next strategy. Case b) Method returns a unitary. Convert unitary into mixture and return. 3. Try to use serial concatenation recursively. Case a) One or more decomposed operators doesn't have mixture. `val` does not have a mixture representation. Case b) All decomposed operators have mixture representation. Serially concatenate and return the result using superoperator as intermediate. Args: val: The value to decompose into a mixture of unitaries. default: A default value if val does not support mixture. Returns: An iterable of tuples of size 2. The first element of the tuple is a probability (between 0 and 1) and the second is the object that occurs with that probability in the mixture. The probabilities will sum to 1.0. Raises: TypeError: If `val` has no `_mixture_` or `_unitary_` mehod, or if it does and this method returned `NotImplemented`. """""" mixture_result = _gettr_helper(val, ['_mixture_']) if mixture_result is not None and mixture_result is not NotImplemented: return mixture_result unitary_result = unitary_protocol.unitary(val, None) if unitary_result is not None and unitary_result is not NotImplemented: return ((1.0, unitary_result),) decomposed, qubits, _ = _try_decompose_into_operations_and_qubits(val) # serial concatenation if decomposed is not None and decomposed != [val] and decomposed != []: if all([has_mixture(x) for x in decomposed]): superoperator_list = [_moment_superoperator(x, qubits, None) for x in decomposed] if not any([x is None for x in superoperator_list]): superoperator_result = reduce(lambda x, y: x @ y, superoperator_list) return tuple(_superoperator_to_mixture(superoperator_result)) if default is not RaiseTypeErrorIfNotProvided: return default if _gettr_helper(val, ['_unitary_', '_mixture_']) is None: raise TypeError(f""object of type '{type(val)}' has no _mixture_ or _unitary_ method."") raise TypeError( ""object of type '{}' does have a _mixture_ or _unitary_ "" ""method, but it returned NotImplemented."".format(type(val)) ) ","def mixture( val: Any, default: TDefault = RaiseTypeErrorIfNotProvided ) -> Union[Sequence[Tuple[float, np.ndarray]], TDefault]: """"""Return a sequence of tuples representing a probabilistic unitary. A mixture is described by an iterable of tuples of the form (probability of unitary, unitary as numpy array) The probability components of the tuples must sum to 1.0 and be non-negative. Determines the Mixture representation of `val` by the following strategies: 1. Try to use `val._mixture_()`. Case a) Method not present or returns `None`. Continue to next strategy. Case b) Returns the Mixture representation. Return the result. 2. Try to use `unitary()`. Case a) Method not present or returns `None`. Continue to next strategy. Case b) Method returns a unitary. Convert unitary into mixture and return. 3. Try to use serial concatenation recursively. Case a) One or more decomposed operators doesn't have mixture. `val` does not have a mixture representation. Case b) All decomposed operators have mixture representation. Serially concatenate and return the result using superoperator as intermediate. Args: val: The value to decompose into a mixture of unitaries. default: A default value if val does not support mixture. Returns: An iterable of tuples of size 2. The first element of the tuple is a probability (between 0 and 1) and the second is the object that occurs with that probability in the mixture. The probabilities will sum to 1.0. Raises: TypeError: If `val` has no `_mixture_` or `_unitary_` mehod, or if it does and this method returned `NotImplemented`. """""" mixture_result = _gettr_helper(val, ['_mixture_']) if mixture_result is not None and mixture_result is not NotImplemented: return mixture_result unitary_result = unitary_protocol.unitary(val, None) if unitary_result is not None and unitary_result is not NotImplemented: return ((1.0, unitary_result),) decomposed, qubits, _ = _try_decompose_into_operations_and_qubits(val) # serial concatenation if decomposed is not None and decomposed != [val] and decomposed != []: if all([has_mixture(x) for x in decomposed]): superoperator_list = [_moment_superoperator(x, qubits, None) for x in decomposed] if not any([x is None for x in superoperator_list]): superoperator_result = reduce(lambda x, y: x @ y, superoperator_list) return tuple(_superoperator_to_mixture(superoperator_result)) if default is not RaiseTypeErrorIfNotProvided: return default if _gettr_helper(val, ['_unitary_', '_mixture_']) is None: raise TypeError(f""object of type '{type(val)}' has no _mixture_ or _unitary_ method."") raise TypeError( ""object of type '{}' does have a _mixture_ or _unitary_ "" ""method, but it returned NotImplemented."".format(type(val)) ) " 41074,"def get_partial_state_dict(model_state_dict, modules): """"""Create state_dict with specified modules matching input model modules. Note that please use get_partial_lm_state_dict for LM. Args: model_state_dict (OrderedDict): trained model state_dict modules (list): specified module list for transfer Return: new_state_dict (OrderedDict): the updated state_dict """""" new_state_dict = OrderedDict() for key, value in model_state_dict.items(): if any(key.startswith(m) for m in modules): new_state_dict[key] = value return new_state_dict ","def get_partial_state_dict(model_state_dict, modules): """"""Create state_dict with specified modules matching input model modules. Note that get_partial_lm_state_dict is used if a LM specified. Args: model_state_dict (OrderedDict): trained model state_dict modules (list): specified module list for transfer Return: new_state_dict (OrderedDict): the updated state_dict """""" new_state_dict = OrderedDict() for key, value in model_state_dict.items(): if any(key.startswith(m) for m in modules): new_state_dict[key] = value return new_state_dict " 29631,"def merge_and_deserialize(header, frames, deserializers=None, memoryview_offset=0): """"""Merge and deserialize frames This function is a replacement for `deserialize()` that merges frames that were split by `serialize_and_split()`. When ``frames`` contains memoryviews that share an underlying buffer, ``memoryview_offset`` must be the index into that underlying buffer where the ``frames`` starts (in bytes, not frame counts). See Also -------- deserialize serialize_and_split merge_subframes """""" merged_frames = [] if ""split-num-sub-frames"" not in header: merged_frames = frames else: frame_byte_offsets = list(itertools.accumulate(map(len, frames))) for n, offset in zip(header[""split-num-sub-frames""], header[""split-offsets""]): if n == 1: merged = frames[offset] else: subframes = frames[offset : offset + n] merged = merge_subframes( subframes, memoryview_offset=memoryview_offset + (frame_byte_offsets[offset - 1] if offset else 0), ) merged_frames.append(merged) memoryview_offset += len(merged) return deserialize(header, merged_frames, deserializers=deserializers) ","def merge_and_deserialize(header, frames, deserializers=None, memoryview_offset=0): """"""Merge and deserialize frames This function is a replacement for `deserialize()` that merges frames that were split by `serialize_and_split()`. When ``frames`` contains memoryviews that share an underlying buffer, ``memoryview_offset`` must be the index into that underlying buffer where the ``frames`` start (in bytes, not frame counts). See Also -------- deserialize serialize_and_split merge_subframes """""" merged_frames = [] if ""split-num-sub-frames"" not in header: merged_frames = frames else: frame_byte_offsets = list(itertools.accumulate(map(len, frames))) for n, offset in zip(header[""split-num-sub-frames""], header[""split-offsets""]): if n == 1: merged = frames[offset] else: subframes = frames[offset : offset + n] merged = merge_subframes( subframes, memoryview_offset=memoryview_offset + (frame_byte_offsets[offset - 1] if offset else 0), ) merged_frames.append(merged) memoryview_offset += len(merged) return deserialize(header, merged_frames, deserializers=deserializers) " 54283,"def test_process_normfactor_configs(): # Check to see if mu_ttbar NormFactor is overridden correctly # - ParamSetting has a config for it # - other_parameter_configs has a config for it # Make sure that when two measurements exist, we're copying things across correctly toplvl = ET.Element(""Combination"") meas = ET.Element( ""Measurement"", Name='NormalMeasurement', Lumi=str(1.0), LumiRelErr=str(0.017), ExportOnly=str(True), ) poiel = ET.Element('POI') poiel.text = 'mu_SIG' meas.append(poiel) se = ET.Element('ParamSetting', Const='True') se.text = ' '.join(['Lumi', 'mu_both', 'mu_paramSettingOnly']) meas.append(se) se = ET.Element('ParamSetting', Val='2.0') se.text = ' '.join(['mu_both']) meas.append(se) toplvl.append(meas) meas = ET.Element( ""Measurement"", Name='ParallelMeasurement', Lumi=str(1.0), LumiRelErr=str(0.017), ExportOnly=str(True), ) poiel = ET.Element('POI') poiel.text = 'mu_BKG' meas.append(poiel) se = ET.Element('ParamSetting', Val='3.0') se.text = ' '.join(['mu_both']) meas.append(se) toplvl.append(meas) other_parameter_configs = [ dict(name='mu_both', inits=[1.0], bounds=[[1.0, 5.0]], fixed=False), dict(name='mu_otherConfigOnly', inits=[1.0], bounds=[[0.0, 10.0]], fixed=False), ] result = pyhf.readxml.process_measurements( toplvl, other_parameter_configs=other_parameter_configs ) result = { m['name']: {k['name']: k for k in m['config']['parameters']} for m in result } assert result # make sure ParamSetting configs override NormFactor configs assert result['NormalMeasurement']['mu_both']['fixed'] assert result['NormalMeasurement']['mu_both']['inits'] == [2.0] assert result['NormalMeasurement']['mu_both']['bounds'] == [[1.0, 5.0]] # make sure ParamSetting is doing the right thing assert result['NormalMeasurement']['mu_paramSettingOnly']['fixed'] assert 'inits' not in result['NormalMeasurement']['mu_paramSettingOnly'] assert 'bounds' not in result['NormalMeasurement']['mu_paramSettingOnly'] # make sure our code doesn't accidentally override other parameter configs assert not result['NormalMeasurement']['mu_otherConfigOnly']['fixed'] assert result['NormalMeasurement']['mu_otherConfigOnly']['inits'] == [1.0] assert result['NormalMeasurement']['mu_otherConfigOnly']['bounds'] == [[0.0, 10.0]] # make sure settings from one measurement don't leak to other assert not result['ParallelMeasurement']['mu_both']['fixed'] assert result['ParallelMeasurement']['mu_both']['inits'] == [3.0] assert result['ParallelMeasurement']['mu_both']['bounds'] == [[1.0, 5.0]] ","def test_process_normfactor_configs(): # Check to see if mu_ttbar NormFactor is overridden correctly # - ParamSetting has a config for it # - other_parameter_configs has a config for it # Make sure that when two measurements exist, we're copying things across correctly toplvl = ET.Element(""Combination"") meas = ET.Element( ""Measurement"", Name='NormalMeasurement', Lumi=str(1.0), LumiRelErr=str(0.017), ExportOnly=str(True), ) poiel = ET.Element('POI') poiel.text = 'mu_SIG' meas.append(poiel) setting = ET.Element('ParamSetting', Const='True') setting.text = ' '.join(['Lumi', 'mu_both', 'mu_paramSettingOnly']) meas.append(setting) se.text = ' '.join(['Lumi', 'mu_both', 'mu_paramSettingOnly']) meas.append(se) se = ET.Element('ParamSetting', Val='2.0') se.text = ' '.join(['mu_both']) meas.append(se) toplvl.append(meas) meas = ET.Element( ""Measurement"", Name='ParallelMeasurement', Lumi=str(1.0), LumiRelErr=str(0.017), ExportOnly=str(True), ) poiel = ET.Element('POI') poiel.text = 'mu_BKG' meas.append(poiel) se = ET.Element('ParamSetting', Val='3.0') se.text = ' '.join(['mu_both']) meas.append(se) toplvl.append(meas) other_parameter_configs = [ dict(name='mu_both', inits=[1.0], bounds=[[1.0, 5.0]], fixed=False), dict(name='mu_otherConfigOnly', inits=[1.0], bounds=[[0.0, 10.0]], fixed=False), ] result = pyhf.readxml.process_measurements( toplvl, other_parameter_configs=other_parameter_configs ) result = { m['name']: {k['name']: k for k in m['config']['parameters']} for m in result } assert result # make sure ParamSetting configs override NormFactor configs assert result['NormalMeasurement']['mu_both']['fixed'] assert result['NormalMeasurement']['mu_both']['inits'] == [2.0] assert result['NormalMeasurement']['mu_both']['bounds'] == [[1.0, 5.0]] # make sure ParamSetting is doing the right thing assert result['NormalMeasurement']['mu_paramSettingOnly']['fixed'] assert 'inits' not in result['NormalMeasurement']['mu_paramSettingOnly'] assert 'bounds' not in result['NormalMeasurement']['mu_paramSettingOnly'] # make sure our code doesn't accidentally override other parameter configs assert not result['NormalMeasurement']['mu_otherConfigOnly']['fixed'] assert result['NormalMeasurement']['mu_otherConfigOnly']['inits'] == [1.0] assert result['NormalMeasurement']['mu_otherConfigOnly']['bounds'] == [[0.0, 10.0]] # make sure settings from one measurement don't leak to other assert not result['ParallelMeasurement']['mu_both']['fixed'] assert result['ParallelMeasurement']['mu_both']['inits'] == [3.0] assert result['ParallelMeasurement']['mu_both']['bounds'] == [[1.0, 5.0]] " 58114,"def create_or_query(delimiter_str: str, param_name: str, wrapper: str = '""') -> str: if not delimiter_str: return '' arr = delimiter_str.split(',') query = '' for item in arr: query += f'{param_name}={wrapper}{item}{wrapper} OR ' return query[:len(query) - 3] ","def create_or_query(delimiter_str: str, param_name: str, wrapper: str = '""') -> str: if not delimiter_str: return '' arr = delimiter_str.split(',') query = '' query = ' OR '.join(f'{param_name}={wrapper}{item}{wrapper}' for item in arr) return query " 49438,"def getSearchResults(search): result = {""data"": []} regSearch = re.compile(re.escape(search), re.IGNORECASE) links = {""n"": ""Link"", ""d"": []} via4 = getInfo(""via4"") if via4: for vLink in via4.get(""searchables"", []): links[""d""].extend(sanitize(colVIA4.find({vLink: {""$in"": [regSearch]}}))) try: textsearch = {""n"": ""Text search"", ""d"": getFreeText(search)} except: textsearch = {""n"": ""Text search"", ""d"": []} result[""errors""] = [""textsearch""] for collection in [links, textsearch]: for item in collection[""d""]: # Check if already in result data if not any(item[""id""] == entry[""id""] for entry in result[""data""]): entry = getCVE(item[""id""]) if entry: entry[""reason""] = collection[""n""] result[""data""].append(entry) return result ","def getSearchResults(search): result = {""data"": []} regSearch = re.compile(re.escape(search), re.IGNORECASE) links = {""n"": ""Link"", ""d"": []} try: for vLink in via4.get(""searchables"", []): links[""d""].extend(sanitize(colVIA4.find({vLink: {""$in"": [regSearch]}}))) except AttributeError: return result try: textsearch = {""n"": ""Text search"", ""d"": getFreeText(search)} except: textsearch = {""n"": ""Text search"", ""d"": []} result[""errors""] = [""textsearch""] for collection in [links, textsearch]: for item in collection[""d""]: # Check if already in result data if not any(item[""id""] == entry[""id""] for entry in result[""data""]): entry = getCVE(item[""id""]) if entry: entry[""reason""] = collection[""n""] result[""data""].append(entry) return result " 7526,"def test_sigma_clip_masked_data_values(): """""" Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """""" data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data) ","def test_sigma_clip_masked_data_values(): """""" Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """""" data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert_equal(result.data, data) assert np.shares_memory(result.data, data) " 57796,"def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0', **kwargs) -> Tuple[str, dict, list]: limit = int(limit) offset = int(offset) try: json_pipeline = validate_json_objects(json.loads(pipeline)) raw_response = client.pipeline_query( collection=collection, pipeline=json_pipeline, ) except JSONDecodeError: raise DemistoException('The `pipeline` argument is not a valid json.') if raw_response: raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)] readable_outputs = tableToMarkdown( f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` ' f'with pipeline: {pipeline}:', t=[entry.get('_id') for entry in raw_response], headers=['_id'], ) outputs_objects = list() for item in raw_response: item.update({'collection': collection}) outputs_objects.append(item) outputs = {CONTEXT_KEY: outputs_objects} return readable_outputs, outputs, raw_response else: return 'MongoDB: No results found', {}, raw_response ","def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0', **kwargs) -> Tuple[str, dict, list]: limit = arg_to_number(limit) offset = arg_to_number(offset) try: json_pipeline = validate_json_objects(json.loads(pipeline)) raw_response = client.pipeline_query( collection=collection, pipeline=json_pipeline, ) except JSONDecodeError: raise DemistoException('The `pipeline` argument is not a valid json.') if raw_response: raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)] readable_outputs = tableToMarkdown( f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` ' f'with pipeline: {pipeline}:', t=[entry.get('_id') for entry in raw_response], headers=['_id'], ) outputs_objects = list() for item in raw_response: item.update({'collection': collection}) outputs_objects.append(item) outputs = {CONTEXT_KEY: outputs_objects} return readable_outputs, outputs, raw_response else: return 'MongoDB: No results found', {}, raw_response " 38541,"def match_grids_along_1d_mortar( mg: pp.MortarGrid, g_new: pp.Grid, g_old: pp.Grid, tol: float, scaling: Literal[""averaged"", ""integrated""], ) -> sps.csr_matrix: """"""Match the faces of two 2d grids along a 1d mortar grid. The function identifies faces on the 1d segment specified by the MortarGrid, and finds the area weights of the matched faces. Both sides of the mortar grid are taken care of. Args: mg (pp.MortarGrid): MortarGrid that specifies the target 1d line. Must be of dimension 1. g_new (pp.Grid): New 2d grid. Should have faces split along the 1d line. Dimension 2. g_old (pp.Grid): Old 2d grid. Dimension 2. The mappings in mg from mortar to primary should be set for this grid. tol (double): Tolerance used in comparison of geometric quantities. scaling (str, optional): Control weights of the returned matrix, see return values for specification. Returns: sps.csr_matrix: Matrix that can be used to update mg._primary_to_mortar_int by by right multiplication; essentially a mapping from the new to the old grid. Raises: ValueError: If the matching procedure goes wrong. """""" # IMPLEMENTATION NOTE: Contrary to the related methods match_1d/match_2d, the # scaling argument is not permitted to be None in this function. This is by design, # it is less clear how to realize such a scaling in this case. # IMPLEMENTATION NOTE: # The algorithm is technical, partly because we also need to differ between # the left and right side of the segment, as these will belong to different # mortar grids. # # The main steps are: # 1) Identify faces in the old grid along the segment via the existing # mapping between mortar grid and higher dimensional grid. Use this # to define the geometry of the segment. # 2) Define positive and negative side of the segment, and split cells # and faces along the segement according to this criterion. # 3) For all sides (pos, neg), pick out faces in the old and new grid, # and match them up. Extend the mapping to go from all faces in the # two grids. # # Known weak points: Identification of geometric objects, in particular # points, is based on a geometric tolerance. For very fine, or bad, grids # this may give trouble. def cells_from_faces(g, fi): # Find cells of faces, specified by face indices fi. # It is assumed that fi is on the boundary, e.g. there is a single # cell for each element in fi. f, ci, _ = sps.find(g.cell_faces[fi]) if f.size != fi.size: raise ValueError(""We assume fi are boundary faces"") ismem, ind_map = ismember_rows(fi, fi[f], sort=False) if not np.all(ismem): raise ValueError return ci[ind_map] def create_1d_from_nodes(nodes): # From a set of nodes, create a 1d grid. duplicate nodes are removed # and we verify that the nodes are indeed colinear if not pp.geometry_property_checks.points_are_collinear(nodes, tol=tol): raise ValueError(""Nodes are not colinear"") sort_ind = pp.map_geometry.sort_points_on_line(nodes, tol=tol) n = nodes[:, sort_ind] unique_nodes, _, _ = unique_columns_tol(n, tol=tol) g = TensorGrid(np.arange(unique_nodes.shape[1])) g.nodes = unique_nodes g.compute_geometry() return g, sort_ind def nodes_of_faces(g, fi): # Find nodes of a set of faces. f = np.zeros(g.num_faces) f[fi] = 1 nodes = np.where(g.face_nodes * f > 0)[0] return nodes def face_to_cell_map(g_2d, g_1d, loc_faces, loc_nodes): # Match faces in a 2d grid and cells in a 1d grid by identifying # face-nodes and cell-node relations. # loc_faces are faces in 2d grid that are known to coincide with # cells. # loc_nodes are indices of 2d nodes along the segment, sorted so that # the ordering coincides with nodes in 1d grid # face-node relation in higher dimensional grid fn = g_2d.face_nodes.indices.reshape((g_2d.dim, g_2d.num_faces), order=""F"") # Reduce to faces along segment fn_loc = fn[:, loc_faces] # Mapping from global (2d) indices to the local indices used in 1d # grid. This also account for a sorting of the nodes, so that the # nodes. ind_map = np.zeros(g_2d.num_faces, dtype=int) ind_map[loc_nodes] = np.arange(loc_nodes.size) # Face-node in local indices fn_loc = ind_map[fn_loc] # Handle special case if loc_faces.size == 1: fn_loc = fn_loc.reshape((2, 1)) # Cell-node relation in 1d cn = g_1d.cell_nodes().indices.reshape((2, g_1d.num_cells), order=""F"") # Find cell index of each face ismem, ind = ismember_rows(fn_loc, cn) # Quality check, the grids should be conforming if not np.all(ismem): raise ValueError return ind # First create a virtual 1d grid along the line, using nodes from the old grid # Identify faces in the old grid that is on the boundary _, faces_on_boundary_old, _ = sps.find(mg._primary_to_mortar_int) # Find the nodes of those faces nodes_on_boundary_old = nodes_of_faces(g_old, faces_on_boundary_old) nodes_1d_old = g_old.nodes[:, nodes_on_boundary_old] # Normal vector of the line. Somewhat arbitrarily chosen as the first one. # This may be prone to rounding errors. normal = g_old.face_normals[:, faces_on_boundary_old[0]].reshape((3, 1)) # Create first version of 1d grid, we really only need start and endpoint g_aux, _ = create_1d_from_nodes(nodes_1d_old) # Start, end and midpoint start = g_aux.nodes[:, 0] end = g_aux.nodes[:, -1] mp = 0.5 * (start + end).reshape((3, 1)) # Find cells in 2d close to the segment bound_cells_old = cells_from_faces(g_old, faces_on_boundary_old) # This may occur if the mortar grid is one sided (T-intersection) # assert bound_cells_old.size > 1, 'Have not implemented this. Not difficult though' # Vector from midpoint to cell centers. Check which side the cells are on # relative to normal vector. # We are here assuming that the segment is not too curved (due to rounding # errors). Pain to come. cc_old = g_old.cell_centers[:, bound_cells_old] side_old = np.sign(np.sum(((cc_old - mp) * normal), axis=0)) # Find cells on the positive and negative side, relative to the positioning # in cells_from_faces pos_side_old = np.where(side_old > 0)[0] neg_side_old = np.where(side_old < 0)[0] if pos_side_old.size + neg_side_old.size != side_old.size: raise ValueError both_sides_old = [pos_side_old, neg_side_old] # Then virtual 1d grid for the new grid. This is a bit more involved, # since we need to identify the nodes by their coordinates. # This part will be prone to rounding errors, in particular for # badly shaped cells. nodes_new = g_new.nodes # Represent the 1d line by its start and end point, as pulled # from the old 1d grid (known coordinates) # Find distance from the nodes to the line defined by the mortar grid. dist, _ = pp.distances.points_segments(nodes_new, start, end) # Look for points in the new grid with a small distance to the line. hit = np.argwhere(dist.ravel() < tol).reshape((1, -1))[0] # Depending on geometric tolerance and grid quality, hit # may contain nodes that are close to the 1d line, but not on it # To improve the results, also require that the faces are boundary faces # We know we are in 2d, thus all faces have two nodes # We can do the same trick in 3d, provided we have simplex grids # but this will fail on Cartesian or polyhedral grids fn = g_new.face_nodes.indices.reshape((2, g_new.num_faces), order=""F"") fn_in_hit = np.isin(fn, hit) # Faces where all points are found in hit faces_by_hit = np.where(np.all(fn_in_hit, axis=0))[0] faces_on_boundary_new = np.where(g_new.tags[""fracture_faces""].ravel())[0] # Only consider faces both in hit, and that are boundary faces_on_boundary_new = np.intersect1d(faces_by_hit, faces_on_boundary_new) # Cells along the segment, from the new grid bound_cells_new = cells_from_faces(g_new, faces_on_boundary_new) # assert bound_cells_new.size > 1, 'Have not implemented this. Not difficult though' cc_new = g_new.cell_centers[:, bound_cells_new] side_new = np.sign(np.sum(((cc_new - mp) * normal), axis=0)) pos_side_new = np.where(side_new > 0)[0] neg_side_new = np.where(side_new < 0)[0] if pos_side_new.size + neg_side_new.size != side_new.size: raise ValueError both_sides_new = [pos_side_new, neg_side_new] # Mapping matrix. matrix = sps.coo_matrix((g_old.num_faces, g_new.num_faces)) for so, sn in zip(both_sides_old, both_sides_new): if len(sn) == 0 or len(so) == 0: # EK: Not sure how this would happen continue # Pick out faces along boundary in old grid, uniquify nodes, and # define auxiliary grids loc_faces_old = faces_on_boundary_old[so] loc_nodes_old = np.unique(nodes_of_faces(g_old, loc_faces_old)) g_aux_old, sort_ind_old = create_1d_from_nodes(g_old.nodes[:, loc_nodes_old]) # Similar for new grid loc_faces_new = faces_on_boundary_new[sn] loc_nodes_new = np.unique(fn[:, loc_faces_new]) g_aux_new, sort_ind_new = create_1d_from_nodes(nodes_new[:, loc_nodes_new]) # Map from global faces to faces along segment in old grid n_loc_old = loc_faces_old.size face_map_old = sps.coo_matrix( (np.ones(n_loc_old), (np.arange(n_loc_old), loc_faces_old)), shape=(n_loc_old, g_old.num_faces), ) # Map from global faces to faces along segment in new grid n_loc_new = loc_faces_new.size face_map_new = sps.coo_matrix( (np.ones(n_loc_new), (np.arange(n_loc_new), loc_faces_new)), shape=(n_loc_new, g_new.num_faces), ) # Map from faces along segment in old to new grid. Consists of three # stages: faces in old to cells in 1d version of old, between 1d cells # in old and new, cells in new to faces in new # From faces to cells in old grid rows = face_to_cell_map( g_old, g_aux_old, loc_faces_old, loc_nodes_old[sort_ind_old] ) cols = np.arange(rows.size) face_to_cell_old = sps.coo_matrix((np.ones(rows.size), (rows, cols))) # Mapping between cells in 1d grid. # Note the order here: The old grid is the target (we want a mapping from the # new to the old grid). between_cells = match_1d(g_aux_old, g_aux_new, tol, scaling) # From faces to cell in new grid rows = face_to_cell_map( g_new, g_aux_new, loc_faces_new, loc_nodes_new[sort_ind_new] ) cols = np.arange(rows.size) face_to_cell_new = sps.coo_matrix((np.ones(rows.size), (rows, cols))) # Composite mapping from faces in new 2d grid to faces in old 2d grid. # Only faces on the boundary of the 1d grid. face_map_segment = face_to_cell_old * between_cells * face_to_cell_new # Extend face-map to go from all faces in the new grid to all faces in the # old one. face_map = face_map_old.T * face_map_segment * face_map_new matrix += face_map return matrix.tocsr() ","def match_grids_along_1d_mortar( mg: pp.MortarGrid, g_new: pp.Grid, g_old: pp.Grid, tol: float, scaling: Literal[""averaged"", ""integrated""], ) -> sps.csr_matrix: """"""Match the faces of two 2d grids along a 1d mortar grid. The function identifies faces on the 1d segment specified by the MortarGrid, and finds the area weights of the matched faces. Both sides of the mortar grid are taken care of. Args: mg (pp.MortarGrid): MortarGrid that specifies the target 1d line. Must be of dimension 1. g_new (pp.Grid): New 2d grid. Should have faces split along the 1d line. Dimension 2. g_old (pp.Grid): Old 2d grid. Dimension 2. The mappings in mg from mortar to primary should be set for this grid. tol (double): Tolerance used in comparison of geometric quantities. scaling (str, optional): Control weights of the returned matrix, see return values for specification. Returns: sps.csr_matrix: Matrix that can be used to update mg._primary_to_mortar_int by right multiplication; essentially a mapping from the new to the old grid. Raises: ValueError: If the matching procedure goes wrong. """""" # IMPLEMENTATION NOTE: Contrary to the related methods match_1d/match_2d, the # scaling argument is not permitted to be None in this function. This is by design, # it is less clear how to realize such a scaling in this case. # IMPLEMENTATION NOTE: # The algorithm is technical, partly because we also need to differ between # the left and right side of the segment, as these will belong to different # mortar grids. # # The main steps are: # 1) Identify faces in the old grid along the segment via the existing # mapping between mortar grid and higher dimensional grid. Use this # to define the geometry of the segment. # 2) Define positive and negative side of the segment, and split cells # and faces along the segement according to this criterion. # 3) For all sides (pos, neg), pick out faces in the old and new grid, # and match them up. Extend the mapping to go from all faces in the # two grids. # # Known weak points: Identification of geometric objects, in particular # points, is based on a geometric tolerance. For very fine, or bad, grids # this may give trouble. def cells_from_faces(g, fi): # Find cells of faces, specified by face indices fi. # It is assumed that fi is on the boundary, e.g. there is a single # cell for each element in fi. f, ci, _ = sps.find(g.cell_faces[fi]) if f.size != fi.size: raise ValueError(""We assume fi are boundary faces"") ismem, ind_map = ismember_rows(fi, fi[f], sort=False) if not np.all(ismem): raise ValueError return ci[ind_map] def create_1d_from_nodes(nodes): # From a set of nodes, create a 1d grid. duplicate nodes are removed # and we verify that the nodes are indeed colinear if not pp.geometry_property_checks.points_are_collinear(nodes, tol=tol): raise ValueError(""Nodes are not colinear"") sort_ind = pp.map_geometry.sort_points_on_line(nodes, tol=tol) n = nodes[:, sort_ind] unique_nodes, _, _ = unique_columns_tol(n, tol=tol) g = TensorGrid(np.arange(unique_nodes.shape[1])) g.nodes = unique_nodes g.compute_geometry() return g, sort_ind def nodes_of_faces(g, fi): # Find nodes of a set of faces. f = np.zeros(g.num_faces) f[fi] = 1 nodes = np.where(g.face_nodes * f > 0)[0] return nodes def face_to_cell_map(g_2d, g_1d, loc_faces, loc_nodes): # Match faces in a 2d grid and cells in a 1d grid by identifying # face-nodes and cell-node relations. # loc_faces are faces in 2d grid that are known to coincide with # cells. # loc_nodes are indices of 2d nodes along the segment, sorted so that # the ordering coincides with nodes in 1d grid # face-node relation in higher dimensional grid fn = g_2d.face_nodes.indices.reshape((g_2d.dim, g_2d.num_faces), order=""F"") # Reduce to faces along segment fn_loc = fn[:, loc_faces] # Mapping from global (2d) indices to the local indices used in 1d # grid. This also account for a sorting of the nodes, so that the # nodes. ind_map = np.zeros(g_2d.num_faces, dtype=int) ind_map[loc_nodes] = np.arange(loc_nodes.size) # Face-node in local indices fn_loc = ind_map[fn_loc] # Handle special case if loc_faces.size == 1: fn_loc = fn_loc.reshape((2, 1)) # Cell-node relation in 1d cn = g_1d.cell_nodes().indices.reshape((2, g_1d.num_cells), order=""F"") # Find cell index of each face ismem, ind = ismember_rows(fn_loc, cn) # Quality check, the grids should be conforming if not np.all(ismem): raise ValueError return ind # First create a virtual 1d grid along the line, using nodes from the old grid # Identify faces in the old grid that is on the boundary _, faces_on_boundary_old, _ = sps.find(mg._primary_to_mortar_int) # Find the nodes of those faces nodes_on_boundary_old = nodes_of_faces(g_old, faces_on_boundary_old) nodes_1d_old = g_old.nodes[:, nodes_on_boundary_old] # Normal vector of the line. Somewhat arbitrarily chosen as the first one. # This may be prone to rounding errors. normal = g_old.face_normals[:, faces_on_boundary_old[0]].reshape((3, 1)) # Create first version of 1d grid, we really only need start and endpoint g_aux, _ = create_1d_from_nodes(nodes_1d_old) # Start, end and midpoint start = g_aux.nodes[:, 0] end = g_aux.nodes[:, -1] mp = 0.5 * (start + end).reshape((3, 1)) # Find cells in 2d close to the segment bound_cells_old = cells_from_faces(g_old, faces_on_boundary_old) # This may occur if the mortar grid is one sided (T-intersection) # assert bound_cells_old.size > 1, 'Have not implemented this. Not difficult though' # Vector from midpoint to cell centers. Check which side the cells are on # relative to normal vector. # We are here assuming that the segment is not too curved (due to rounding # errors). Pain to come. cc_old = g_old.cell_centers[:, bound_cells_old] side_old = np.sign(np.sum(((cc_old - mp) * normal), axis=0)) # Find cells on the positive and negative side, relative to the positioning # in cells_from_faces pos_side_old = np.where(side_old > 0)[0] neg_side_old = np.where(side_old < 0)[0] if pos_side_old.size + neg_side_old.size != side_old.size: raise ValueError both_sides_old = [pos_side_old, neg_side_old] # Then virtual 1d grid for the new grid. This is a bit more involved, # since we need to identify the nodes by their coordinates. # This part will be prone to rounding errors, in particular for # badly shaped cells. nodes_new = g_new.nodes # Represent the 1d line by its start and end point, as pulled # from the old 1d grid (known coordinates) # Find distance from the nodes to the line defined by the mortar grid. dist, _ = pp.distances.points_segments(nodes_new, start, end) # Look for points in the new grid with a small distance to the line. hit = np.argwhere(dist.ravel() < tol).reshape((1, -1))[0] # Depending on geometric tolerance and grid quality, hit # may contain nodes that are close to the 1d line, but not on it # To improve the results, also require that the faces are boundary faces # We know we are in 2d, thus all faces have two nodes # We can do the same trick in 3d, provided we have simplex grids # but this will fail on Cartesian or polyhedral grids fn = g_new.face_nodes.indices.reshape((2, g_new.num_faces), order=""F"") fn_in_hit = np.isin(fn, hit) # Faces where all points are found in hit faces_by_hit = np.where(np.all(fn_in_hit, axis=0))[0] faces_on_boundary_new = np.where(g_new.tags[""fracture_faces""].ravel())[0] # Only consider faces both in hit, and that are boundary faces_on_boundary_new = np.intersect1d(faces_by_hit, faces_on_boundary_new) # Cells along the segment, from the new grid bound_cells_new = cells_from_faces(g_new, faces_on_boundary_new) # assert bound_cells_new.size > 1, 'Have not implemented this. Not difficult though' cc_new = g_new.cell_centers[:, bound_cells_new] side_new = np.sign(np.sum(((cc_new - mp) * normal), axis=0)) pos_side_new = np.where(side_new > 0)[0] neg_side_new = np.where(side_new < 0)[0] if pos_side_new.size + neg_side_new.size != side_new.size: raise ValueError both_sides_new = [pos_side_new, neg_side_new] # Mapping matrix. matrix = sps.coo_matrix((g_old.num_faces, g_new.num_faces)) for so, sn in zip(both_sides_old, both_sides_new): if len(sn) == 0 or len(so) == 0: # EK: Not sure how this would happen continue # Pick out faces along boundary in old grid, uniquify nodes, and # define auxiliary grids loc_faces_old = faces_on_boundary_old[so] loc_nodes_old = np.unique(nodes_of_faces(g_old, loc_faces_old)) g_aux_old, sort_ind_old = create_1d_from_nodes(g_old.nodes[:, loc_nodes_old]) # Similar for new grid loc_faces_new = faces_on_boundary_new[sn] loc_nodes_new = np.unique(fn[:, loc_faces_new]) g_aux_new, sort_ind_new = create_1d_from_nodes(nodes_new[:, loc_nodes_new]) # Map from global faces to faces along segment in old grid n_loc_old = loc_faces_old.size face_map_old = sps.coo_matrix( (np.ones(n_loc_old), (np.arange(n_loc_old), loc_faces_old)), shape=(n_loc_old, g_old.num_faces), ) # Map from global faces to faces along segment in new grid n_loc_new = loc_faces_new.size face_map_new = sps.coo_matrix( (np.ones(n_loc_new), (np.arange(n_loc_new), loc_faces_new)), shape=(n_loc_new, g_new.num_faces), ) # Map from faces along segment in old to new grid. Consists of three # stages: faces in old to cells in 1d version of old, between 1d cells # in old and new, cells in new to faces in new # From faces to cells in old grid rows = face_to_cell_map( g_old, g_aux_old, loc_faces_old, loc_nodes_old[sort_ind_old] ) cols = np.arange(rows.size) face_to_cell_old = sps.coo_matrix((np.ones(rows.size), (rows, cols))) # Mapping between cells in 1d grid. # Note the order here: The old grid is the target (we want a mapping from the # new to the old grid). between_cells = match_1d(g_aux_old, g_aux_new, tol, scaling) # From faces to cell in new grid rows = face_to_cell_map( g_new, g_aux_new, loc_faces_new, loc_nodes_new[sort_ind_new] ) cols = np.arange(rows.size) face_to_cell_new = sps.coo_matrix((np.ones(rows.size), (rows, cols))) # Composite mapping from faces in new 2d grid to faces in old 2d grid. # Only faces on the boundary of the 1d grid. face_map_segment = face_to_cell_old * between_cells * face_to_cell_new # Extend face-map to go from all faces in the new grid to all faces in the # old one. face_map = face_map_old.T * face_map_segment * face_map_new matrix += face_map return matrix.tocsr() " 4687,"def save_diff_image(expected, actual, output): ''' Parameters ---------- expected : str File path of expected image. actual : str File path of actual image. output : str File path to save difference image to. ''' # Drop alpha channels, similarly to compare_images. expected_image = _png.read_png(expected)[..., :3] actual_image = _png.read_png(actual)[..., :3] actual_image, expected_image = crop_to_same( actual, actual_image, expected, expected_image) expected_image = np.array(expected_image).astype(float) actual_image = np.array(actual_image).astype(float) if expected_image.shape != actual_image.shape: raise _imageComparisonFailure( ""_image sizes do not match expected size: {} "" ""actual size {}"".format(expected_image.shape, actual_image.shape)) abs_diff_image = np.abs(expected_image - actual_image) # expand differences in luminance domain abs_diff_image *= 255 * 10 save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8) height, width, depth = save_image_np.shape # The PDF renderer doesn't produce an alpha channel, but the # matplotlib PNG writer requires one, so expand the array if depth == 3: with_alpha = np.empty((height, width, 4), dtype=np.uint8) with_alpha[:, :, 0:3] = save_image_np save_image_np = with_alpha # Hard-code the alpha channel to fully solid save_image_np[:, :, 3] = 255 _png.write_png(save_image_np, output) ","def save_diff_image(expected, actual, output): ''' Parameters ---------- expected : str File path of expected image. actual : str File path of actual image. output : str File path to save difference image to. ''' # Drop alpha channels, similarly to compare_images. expected_image = _png.read_png(expected)[..., :3] actual_image = _png.read_png(actual)[..., :3] actual_image, expected_image = crop_to_same( actual, actual_image, expected, expected_image) expected_image = np.array(expected_image).astype(float) actual_image = np.array(actual_image).astype(float) if expected_image.shape != actual_image.shape: raise ImageComparisonFailure( ""_image sizes do not match expected size: {} "" ""actual size {}"".format(expected_image.shape, actual_image.shape)) abs_diff_image = np.abs(expected_image - actual_image) # expand differences in luminance domain abs_diff_image *= 255 * 10 save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8) height, width, depth = save_image_np.shape # The PDF renderer doesn't produce an alpha channel, but the # matplotlib PNG writer requires one, so expand the array if depth == 3: with_alpha = np.empty((height, width, 4), dtype=np.uint8) with_alpha[:, :, 0:3] = save_image_np save_image_np = with_alpha # Hard-code the alpha channel to fully solid save_image_np[:, :, 3] = 255 _png.write_png(save_image_np, output) " 31279,"def parse_reports_relationships(reports: List, sub_reports: List, matched_relationships: Dict, id_to_object: Dict, courses_of_action_products: Dict) -> Tuple[list, list]: """"""Parse the relationships between reports' malware to attack-patterns and indicators. Args: reports: a list of reports. sub_reports: a list of sub-reports. matched_relationships (Dict): a dict of relationships in the form of `id: list(related_ids)`. id_to_object: a dict in the form of `id: stix_object`. courses_of_action_products (Dict): Returns: A list of processed reports. """""" indicators = [] for report in reports: related_ids = [] # Since main reports dont hold their own relationships theres a need to collect them. related_sub_reports = [object_id for object_id in report.get('rawJSON', {}).get('unit42_object_refs', []) if object_id.startswith('report')] report_malware_set = set() for sub_report in sub_reports: if sub_report.get('id') in related_sub_reports: # Indicators relationship only comes from being related to the malware objects of the report. related_ids += [id_ for id_ in matched_relationships.get(sub_report.get('id'), []) if not id_.startswith('indicator')] for object_id in sub_report.get('object_refs', []): if object_id.startswith('malware'): report_malware_set.add(object_id) elif object_id.startswith('attack-pattern'): related_ids.append(object_id) report['fields']['feedrelatedindicators'] = [] for malware_id in report_malware_set: related_ids += matched_relationships.get(malware_id, []) malware_object = id_to_object.get(malware_id) if malware_object: report['fields']['feedrelatedindicators'].extend([{ 'type': 'Malware', 'value': malware_object.get('name'), 'description': malware_object.get( 'description', ', '.join(malware_object.get('labels', ['No description provided.']))) }]) for relation in related_ids: relation_object = id_to_object.get(relation) if not relation_object: continue if relation.startswith('attack-pattern'): type_name = 'MITRE ATT&CK' relation_value_field = relation_object.get('external_references') elif relation.startswith('indicator'): # Need to create the connection only to file hashes if not relation_object.get('pattern').startswith('[file:'): continue type_name = 'Indicator' relation_value_field = relation_object.get('name') elif relation.startswith('malware'): type_name = 'Malware' relation_value_field = relation_object.get('name') else: continue if isinstance(relation_value_field, str): report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': relation_value_field, 'description': ', '.join(relation_object.get('labels', ['No description provided.'])) }]) indicator_val = relation_value_field else: all_urls = [] external_id = '' for item in relation_value_field: if 'url' in item: all_urls.append(item.get('url')) if 'external_id' in item: external_id = item.get('external_id') report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': external_id, 'description': ','.join(all_urls) }]) indicator_val = external_id # create MITRE ATT&CK indicator if indicator_val and type_name == 'MITRE ATT&CK': # populate mitre course of action data from the relevant relationships relationship = relation_object.get('id') courses_of_action: Dict[str, List] = {} if matched_relationships.get(relationship): for source in matched_relationships[relationship]: if source.startswith('course-of-action') and id_to_object.get(source): relationship_product = courses_of_action_products[source] if not courses_of_action.get(relationship_product): courses_of_action[relationship_product] = [] courses_of_action[relationship_product].append(id_to_object[source]) indicators.append({ ""value"": indicator_val, ""type"": 'MITRE ATT&CK', ""fields"": { ""firstseenbysource"": relation_object.get('created'), ""indicatoridentification"": relation_object.get('id'), ""tags"": [], ""modified"": relation_object.get('modified'), ""reportedby"": 'Unit42', ""mitrecourseofaction"": create_course_of_action_field(courses_of_action) } }) return reports, indicators ","def parse_reports_relationships(reports: List, sub_reports: List, matched_relationships: Dict, id_to_object: Dict, courses_of_action_products: Dict) -> Tuple[list, list]: """"""Parse the relationships between reports' malware to attack-patterns and indicators. Args: reports: a list of reports. sub_reports: a list of sub-reports. matched_relationships (Dict): a dict of relationships in the form of `id: list(related_ids)`. id_to_object: a dict in the form of `id: stix_object`. courses_of_action_products (Dict): Returns: A list of processed reports. """""" indicators = [] for report in reports: related_ids = [] # Since main reports dont hold their own relationships theres a need to collect them. related_sub_reports = [object_id for object_id in report.get('rawJSON', {}).get('unit42_object_refs', []) if object_id.startswith('report')] report_malware_set = set() for sub_report in sub_reports: if sub_report.get('id') in related_sub_reports: # Indicators relationship only comes from being related to the malware objects of the report. related_ids += [id_ for id_ in matched_relationships.get(sub_report.get('id'), []) if not id_.startswith('indicator')] for object_id in sub_report.get('object_refs', []): if object_id.startswith('malware'): report_malware_set.add(object_id) elif object_id.startswith('attack-pattern'): related_ids.append(object_id) report['fields']['feedrelatedindicators'] = [] for malware_id in report_malware_set: related_ids += matched_relationships.get(malware_id, []) malware_object = id_to_object.get(malware_id) if malware_object: report['fields']['feedrelatedindicators'].extend([{ 'type': 'Malware', 'value': malware_object.get('name'), 'description': malware_object.get( 'description', ', '.join(malware_object.get('labels', ['No description provided.']))) }]) for relation in related_ids: relation_object = id_to_object.get(relation) if not relation_object: continue if relation.startswith('attack-pattern'): type_name = 'MITRE ATT&CK' relation_value_field = relation_object.get('external_references') elif relation.startswith('indicator'): # Need to create the connection only to file hashes if not relation_object.get('pattern').startswith('[file:'): continue type_name = 'Indicator' relation_value_field = relation_object.get('name') elif relation.startswith('malware'): type_name = 'Malware' relation_value_field = relation_object.get('name') else: continue if isinstance(relation_value_field, str): report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': relation_value_field, 'description': ', '.join(relation_object.get('labels', ['No description provided.'])) }]) indicator_val = relation_value_field else: all_urls = [] external_id = '' for item in relation_value_field: if 'url' in item: all_urls.append(item.get('url')) if 'external_id' in item: external_id = item.get('external_id') report['fields']['feedrelatedindicators'].extend([{ 'type': type_name, 'value': external_id, 'description': ','.join(all_urls) }]) indicator_val = external_id # create MITRE ATT&CK indicator if indicator_val and type_name == 'MITRE ATT&CK': # populate mitre course of action data from the relevant relationships relationship = relation_object.get('id') courses_of_action: Dict[str, List] = {} if relationship in matched_relationships: for source in matched_relationships[relationship]: if source.startswith('course-of-action') and id_to_object.get(source): relationship_product = courses_of_action_products[source] if not courses_of_action.get(relationship_product): courses_of_action[relationship_product] = [] courses_of_action[relationship_product].append(id_to_object[source]) indicators.append({ ""value"": indicator_val, ""type"": 'MITRE ATT&CK', ""fields"": { ""firstseenbysource"": relation_object.get('created'), ""indicatoridentification"": relation_object.get('id'), ""tags"": [], ""modified"": relation_object.get('modified'), ""reportedby"": 'Unit42', ""mitrecourseofaction"": create_course_of_action_field(courses_of_action) } }) return reports, indicators " 30175,"def execute_link(link_cmd_args, record_streams, quiet): """""" Executes the passed command plus arguments in a subprocess and returns the return value of the executed command. If the specified standard output and standard error of the command are recorded and also returned to the caller. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). TBA (see https://github.com/in-toto/in-toto/issues/6) Executes passed command in a subprocess and redirects stdout and stderr if specified. - A dictionary containing standard output and standard error of the executed command, called by-products. Note: If record_streams is False, the dict values are empty strings. - The return value of the executed command. """""" if record_streams: if (quiet == False): #record_streams true, quiet false return_code, stdout_str, stderr_str = \ securesystemslib.process.run_duplicate_streams(link_cmd_args) else: #record_streams true, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.PIPE, stderr=securesystemslib.process.PIPE) stdout_str = process.stdout stderr_str = process.stderr return_code = process.returncode else: if (quiet == False): #record_streams false, quiet false process = securesystemslib.process.run(link_cmd_args, check=False, stdout=None, stderr=None) stdout_str = stderr_str = """" return_code = process.returncode else: #record_streams false, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.DEVNULL, stderr=securesystemslib.process.DEVNULL) stdout_str = stderr_str = """" return_code = process.returncode return { ""stdout"": stdout_str, ""stderr"": stderr_str, ""return-value"": return_code } ","def execute_link(link_cmd_args, record_streams, quiet): """""" Executes the passed command plus arguments in a subprocess and returns the return value of the executed command. If the specified standard output and standard error of the command are recorded and also returned to the caller. link_cmd_args: A list where the first element is a command and the remaining elements are arguments passed to that command. record_streams: A bool that specifies whether to redirect standard output and and standard error to a temporary file which is returned to the caller (True) or not (False). TBA (see https://github.com/in-toto/in-toto/issues/6) Executes passed command in a subprocess and redirects stdout and stderr if specified. - A dictionary containing standard output and standard error of the executed command, called by-products. Note: If record_streams is False, the dict values are empty strings. - The return value of the executed command. """""" if record_streams: if quiet is False: #record_streams true, quiet false return_code, stdout_str, stderr_str = \ securesystemslib.process.run_duplicate_streams(link_cmd_args) else: #record_streams true, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.PIPE, stderr=securesystemslib.process.PIPE) stdout_str = process.stdout stderr_str = process.stderr return_code = process.returncode else: if (quiet == False): #record_streams false, quiet false process = securesystemslib.process.run(link_cmd_args, check=False, stdout=None, stderr=None) stdout_str = stderr_str = """" return_code = process.returncode else: #record_streams false, quiet true process = securesystemslib.process.run(link_cmd_args, check=False, stdout=securesystemslib.process.DEVNULL, stderr=securesystemslib.process.DEVNULL) stdout_str = stderr_str = """" return_code = process.returncode return { ""stdout"": stdout_str, ""stderr"": stderr_str, ""return-value"": return_code } " 41527,"def load_h5_cache_file( cachefile, use_cached, metadata, current_version, last_compatible_version=OLDEST_COMPATIBLE_VERSION, verbose=True, wavenum_min=0.0, wavenum_max=Inf, ): """"""Function to load a h5 cache file Parameters ---------- cachefile: str cache file path use_cached: str use cache file if value is not ``False``: - if ``True``, use (and generate if doesnt exist) cache file. - if ``'regen'``, delete cache file (if exists) so it is regenerated - if ``'force'``, use cache file and raises an error if it doesnt exist if using the cache file, check if the file is deprecated. If it is deprecated, regenerate the file unless ``'force'`` was used (in that case, raise an error) metadata: dict values are compared to cache file attributes. If they dont match, the file is considered deprecated. See ``use_cached`` to know how to handle deprecated files current_version: str version is compared to cache file version (part of attributes). If current version is superior, a simple warning is triggered. last_compatible_version: str if file version is inferior to this, file is considered deprecated. See ``use_cached`` to know how to handle deprecated files. Default :data:`~radis.OLDEST_COMPATIBLE_VERSION`. Returns ------- df: pandas DataFrame, or None None if no cache file was found, or if it was deleted See Also -------- :data:`~radis.OLDEST_COMPATIBLE_VERSION` """""" # 1. know if we have to load the file if not use_cached: return None elif use_cached == ""regen"" and exists(cachefile): os.remove(cachefile) if verbose: printm(""Deleted h5 cache file : {0}"".format(cachefile)) return None # 2. check the file is here if not exists(cachefile): if use_cached == ""force"": raise ValueError(""Cache file {0} doesnt exist"".format(cachefile)) else: return None # File doesn't exist. It's okay. # 3. read file attributes to know if it's deprecated try: check_not_deprecated( cachefile, metadata, current_version=current_version, last_compatible_version=last_compatible_version, ) # ... if deprecated, raise an error only if 'force' except DeprecatedFileError as err: if use_cached == ""force"": raise err else: if verbose: printr( ""File {0} deprecated:\n{1}\nDeleting it!"".format( cachefile, str(err) ) ) os.remove(cachefile) return None # 4. File is not not deprecated: read the the extremum wavenumbers. try: check_relevancy( cachefile, wavenum_min, wavenum_max, ) # ... if irrelevant, raise an error only if 'force' except IrrelevantFileError as err: if use_cached == ""force"": raise err else: if True: printr(""Database file {0} irrelevant and not loaded"".format(cachefile)) return ""IrrelevantFile"" # 5. File is relevant: read the content. else: df = None if verbose >= 2: printm(""Reading cache file ({0})"".format(cachefile)) try: df = pd.read_hdf(cachefile, ""df"") except KeyError as err: # An error happened during file reading. # Fail safe by deleting cache file (unless we explicitely wanted it # with 'force') if use_cached == ""force"": raise else: if verbose: printr( ""An error happened during cache file reading "" + ""{0}:\n{1}\n"".format(cachefile, str(err)) + ""Deleting cache file to regenerate it"" ) os.remove(cachefile) df = None return df ","def load_h5_cache_file( cachefile, use_cached, metadata, current_version, last_compatible_version=OLDEST_COMPATIBLE_VERSION, verbose=True, wavenum_min=0.0, wavenum_max=Inf, ): """"""Function to load a h5 cache file Parameters ---------- cachefile: str cache file path use_cached: str use cache file if value is not ``False``: - if ``True``, use (and generate if doesnt exist) cache file. - if ``'regen'``, delete cache file (if exists) so it is regenerated - if ``'force'``, use cache file and raises an error if it doesnt exist if using the cache file, check if the file is deprecated. If it is deprecated, regenerate the file unless ``'force'`` was used (in that case, raise an error) metadata: dict values are compared to cache file attributes. If they dont match, the file is considered deprecated. See ``use_cached`` to know how to handle deprecated files current_version: str version is compared to cache file version (part of attributes). If current version is superior, a simple warning is triggered. last_compatible_version: str if file version is inferior to this, file is considered deprecated. See ``use_cached`` to know how to handle deprecated files. Default :data:`~radis.OLDEST_COMPATIBLE_VERSION`. Returns ------- df: pandas DataFrame, or None None if no cache file was found, or if it was deleted See Also -------- :data:`~radis.OLDEST_COMPATIBLE_VERSION` """""" # 1. know if we have to load the file if not use_cached: return None elif use_cached == ""regen"" and exists(cachefile): os.remove(cachefile) if verbose: printm(""Deleted h5 cache file : {0}"".format(cachefile)) return None # 2. check the file is here if not exists(cachefile): if use_cached == ""force"": raise ValueError(""Cache file {0} doesnt exist"".format(cachefile)) else: return None # File doesn't exist. It's okay. # 3. read file attributes to know if it's deprecated try: check_not_deprecated( cachefile, metadata, current_version=current_version, last_compatible_version=last_compatible_version, ) # ... if deprecated, raise an error only if 'force' except DeprecatedFileError as err: if use_cached == ""force"": raise err else: if verbose: printr( ""File {0} deprecated:\n{1}\nDeleting it!"".format( cachefile, str(err) ) ) os.remove(cachefile) return None # 4. File is not not deprecated: read the the extremum wavenumbers. try: check_relevancy( cachefile, wavenum_min, wavenum_max, ) # ... if irrelevant, raise an error only if 'force' except IrrelevantFileError as err: if use_cached == ""force"": raise err else: if True: printg(""Database file {0} irrelevant and not loaded"".format(cachefile)) return ""IrrelevantFile"" # 5. File is relevant: read the content. else: df = None if verbose >= 2: printm(""Reading cache file ({0})"".format(cachefile)) try: df = pd.read_hdf(cachefile, ""df"") except KeyError as err: # An error happened during file reading. # Fail safe by deleting cache file (unless we explicitely wanted it # with 'force') if use_cached == ""force"": raise else: if verbose: printr( ""An error happened during cache file reading "" + ""{0}:\n{1}\n"".format(cachefile, str(err)) + ""Deleting cache file to regenerate it"" ) os.remove(cachefile) df = None return df " 3871,"def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): r""""""Compute the group betweenness centrality for a group of nodes. Group betweenness centrality of a group of nodes $C$ is the sum of the fraction of all-pairs shortest paths that pass through any vertex in $C$ .. math:: c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} where $V$ is the set of nodes, $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of those paths passing through some node in group $C$. Note that $(s, t)$ are not members of the group ($V-C$ is the set of nodes in $V$ that are not in $C$). Parameters ---------- G : graph A NetworkX graph. C : list or set or list of lists or list of sets A group or a list of groups containing nodes which belong to G, for which group betweenness centrality is to be calculated. normalized : bool, optional (default=True) If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. weight : None or string, optional (default=None) If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. The weight of an edge is considered as the length or distances between the to sides. endpoints : bool, optional (default=False) If True include the endpoints in the shortest path counts. Raises ------ NodeNotFound If node(s) in C are not present in G. Returns ------- betweenness : list of floats or float If C is a single group then return a float. If C is a list with several groups then return a list of group betweenness centralities. See Also -------- betweenness_centrality Notes ----- Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. The initial implementation of the algorithm is mentioned in [2]_. This function uses an improved algorithm presented in [4]_. The number of nodes in the group must be a maximum of n - 2 where `n` is the total number of nodes in the graph. For weighted graphs the edge weights must be greater than zero. Zero edge weights can produce an infinite number of equal length paths between pairs of nodes. The total number of paths between source and target is counted differently for directed and undirected graphs. Directed paths between ""u"" and ""v"" are counted as two possible paths (one each direction) while undirected paths between ""u"" and ""v"" are counted as one path. Said another way, the sum in the expression above is over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. References ---------- .. [1] M G Everett and S P Borgatti: The Centrality of Groups and Classes. Journal of Mathematical Sociology. 23(3): 181-201. 1999. http://www.analytictech.com/borgatti/group_centrality.htm .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness Centrality and their Generic Computation. Social Networks 30(2):136-145, 2008. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf .. [3] Sourav Medya et. al.: Group Centrality Maximization via Network Design. SIAM International Conference on Data Mining, SDM 2018, 126–134. https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. ""Fast algorithm for successive computation of group betweenness centrality."" https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 """""" GBC = [] # initialize betweenness list_of_groups = True # check weather C contains one or many groups if any(el in G for el in C): C = [C] list_of_groups = False set_v = {node for group in C for node in group} if set_v - G.nodes: # element(s) of C not in G raise nx.NodeNotFound(f""The node(s) {set_v - G.nodes} are in C but not in G."") # pre-processing PB, sigma, D = _group_preprocessing(G, set_v, weight) # the algorithm for each group for group in C: group = set(group) # set of nodes in group # initialize the matrices of the sigma and the PB GBC_group = 0 sigma_m = deepcopy(sigma) PB_m = deepcopy(PB) sigma_m_v = deepcopy(sigma_m) PB_m_v = deepcopy(PB_m) for v in group: GBC_group += PB_m[v][v] for x in group: for y in group: dxvy = 0 dxyv = 0 dvxy = 0 if not ( sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 ): if D[x][v] == D[x][y] + D[y][v]: dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] if D[x][y] == D[x][v] + D[v][y]: dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] if D[v][y] == D[v][x] + D[x][y]: dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy if y != v: PB_m_v[x][y] -= PB_m[x][v] * dxyv if x != v: PB_m_v[x][y] -= PB_m[v][y] * dvxy sigma_m, sigma_m_v = sigma_m_v, sigma_m PB_m, PB_m_v = PB_m_v, PB_m # endpoints v, c = len(G), len(group) if not endpoints: scale = 0 # if the graph is connected then subtract the endpoints from # the count for all the nodes in the graph. else count how many # nodes are connected to the group's nodes and subtract that. if nx.is_directed(G): if nx.is_strongly_connected(G): scale = c * (2 * v - c - 1) elif nx.is_connected(G): scale = c * (2 * v - c - 1) if scale == 0: for group_node1 in group: for node in D[group_node1]: if node != group_node1: if node in group: scale += 1 else: scale += 2 GBC_group -= scale # normalized if normalized: scale = 1 / ((v - c) * (v - c - 1)) GBC_group *= scale # If undirected than count only the undirected edges elif not G.is_directed(): GBC_group /= 2 GBC.append(GBC_group) if list_of_groups: return GBC else: return GBC[0] ","def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): r""""""Compute the group betweenness centrality for a group of nodes. Group betweenness centrality of a group of nodes $C$ is the sum of the fraction of all-pairs shortest paths that pass through any vertex in $C$ .. math:: c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} where $V$ is the set of nodes, $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of those paths passing through some node in group $C$. Note that $(s, t)$ are not members of the group ($V-C$ is the set of nodes in $V$ that are not in $C$). Parameters ---------- G : graph A NetworkX graph. C : list or set or list of lists or list of sets A group or a list of groups containing nodes which belong to G, for which group betweenness centrality is to be calculated. normalized : bool, optional (default=True) If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. weight : None or string, optional (default=None) If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. The weight of an edge is treated as the length or distance between the two sides. endpoints : bool, optional (default=False) If True include the endpoints in the shortest path counts. Raises ------ NodeNotFound If node(s) in C are not present in G. Returns ------- betweenness : list of floats or float If C is a single group then return a float. If C is a list with several groups then return a list of group betweenness centralities. See Also -------- betweenness_centrality Notes ----- Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. The initial implementation of the algorithm is mentioned in [2]_. This function uses an improved algorithm presented in [4]_. The number of nodes in the group must be a maximum of n - 2 where `n` is the total number of nodes in the graph. For weighted graphs the edge weights must be greater than zero. Zero edge weights can produce an infinite number of equal length paths between pairs of nodes. The total number of paths between source and target is counted differently for directed and undirected graphs. Directed paths between ""u"" and ""v"" are counted as two possible paths (one each direction) while undirected paths between ""u"" and ""v"" are counted as one path. Said another way, the sum in the expression above is over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. References ---------- .. [1] M G Everett and S P Borgatti: The Centrality of Groups and Classes. Journal of Mathematical Sociology. 23(3): 181-201. 1999. http://www.analytictech.com/borgatti/group_centrality.htm .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness Centrality and their Generic Computation. Social Networks 30(2):136-145, 2008. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf .. [3] Sourav Medya et. al.: Group Centrality Maximization via Network Design. SIAM International Conference on Data Mining, SDM 2018, 126–134. https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. ""Fast algorithm for successive computation of group betweenness centrality."" https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 """""" GBC = [] # initialize betweenness list_of_groups = True # check weather C contains one or many groups if any(el in G for el in C): C = [C] list_of_groups = False set_v = {node for group in C for node in group} if set_v - G.nodes: # element(s) of C not in G raise nx.NodeNotFound(f""The node(s) {set_v - G.nodes} are in C but not in G."") # pre-processing PB, sigma, D = _group_preprocessing(G, set_v, weight) # the algorithm for each group for group in C: group = set(group) # set of nodes in group # initialize the matrices of the sigma and the PB GBC_group = 0 sigma_m = deepcopy(sigma) PB_m = deepcopy(PB) sigma_m_v = deepcopy(sigma_m) PB_m_v = deepcopy(PB_m) for v in group: GBC_group += PB_m[v][v] for x in group: for y in group: dxvy = 0 dxyv = 0 dvxy = 0 if not ( sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 ): if D[x][v] == D[x][y] + D[y][v]: dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] if D[x][y] == D[x][v] + D[v][y]: dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] if D[v][y] == D[v][x] + D[x][y]: dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy if y != v: PB_m_v[x][y] -= PB_m[x][v] * dxyv if x != v: PB_m_v[x][y] -= PB_m[v][y] * dvxy sigma_m, sigma_m_v = sigma_m_v, sigma_m PB_m, PB_m_v = PB_m_v, PB_m # endpoints v, c = len(G), len(group) if not endpoints: scale = 0 # if the graph is connected then subtract the endpoints from # the count for all the nodes in the graph. else count how many # nodes are connected to the group's nodes and subtract that. if nx.is_directed(G): if nx.is_strongly_connected(G): scale = c * (2 * v - c - 1) elif nx.is_connected(G): scale = c * (2 * v - c - 1) if scale == 0: for group_node1 in group: for node in D[group_node1]: if node != group_node1: if node in group: scale += 1 else: scale += 2 GBC_group -= scale # normalized if normalized: scale = 1 / ((v - c) * (v - c - 1)) GBC_group *= scale # If undirected than count only the undirected edges elif not G.is_directed(): GBC_group /= 2 GBC.append(GBC_group) if list_of_groups: return GBC else: return GBC[0] " 26474,"def _get_raw_command(known_args): if known_args.force_command: return known_args.force_command.split(' ') elif not os.environ.get('TF_HISTORY'): return known_args.command else: history = os.environ['TF_HISTORY'].split('\n')[::-1] alias = get_alias() executables = get_all_executables() for command in history: diff = SequenceMatcher(a=alias, b=command).ratio() if diff < const.DIFF_WITH_ALIAS or command in executables: return [command] return [] ","def _get_raw_command(known_args): if known_args.force_command: return [known_args.force_command] elif not os.environ.get('TF_HISTORY'): return known_args.command else: history = os.environ['TF_HISTORY'].split('\n')[::-1] alias = get_alias() executables = get_all_executables() for command in history: diff = SequenceMatcher(a=alias, b=command).ratio() if diff < const.DIFF_WITH_ALIAS or command in executables: return [command] return [] " 25064,"def _config_initialization( linter: PyLinter, args_list: list[str], reporter: reporters.BaseReporter | reporters.MultiReporter | None = None, config_file: None | str | Path = None, verbose_mode: bool = False, ) -> list[str]: """"""Parse all available options, read config files and command line arguments and set options accordingly. """""" config_file = Path(config_file) if config_file else None # Set the current module to the configuration file # to allow raising messages on the configuration file. linter.set_current_module(str(config_file) if config_file else """") # Read the configuration file config_file_parser = _ConfigurationFileParser(verbose_mode, linter) try: config_data, config_args = config_file_parser.parse_config_file( file_path=config_file ) except OSError as ex: print(ex, file=sys.stderr) sys.exit(32) # Run init hook, if present, before loading plugins if ""init-hook"" in config_data: exec(utils._unquote(config_data[""init-hook""])) # pylint: disable=exec-used # Load plugins if specified in the config file if ""load-plugins"" in config_data: linter.load_plugin_modules(utils._splitstrip(config_data[""load-plugins""])) unrecognized_options_message = None # First we parse any options from a configuration file try: linter._parse_configuration_file(config_args) except _UnrecognizedOptionError as exc: unrecognized_options_message = "", "".join(exc.options) # Then, if a custom reporter is provided as argument, it may be overridden # by file parameters, so we re-set it here. We do this before command line # parsing, so it's still overridable by command line options if reporter: linter.set_reporter(reporter) # Set the current module to the command line # to allow raising messages on it linter.set_current_module(""Command line"") # Now we parse any options from the command line, so they can override # the configuration file parsed_args_list = linter._parse_command_line_configuration(args_list) # Remove the positional arguments separator from the list of arguments try: parsed_args_list.remove(""--"") except ValueError: pass # Check if there are any options that we do not recognize unrecognized_options: list[str] = [] for opt in parsed_args_list: if opt.startswith(""--""): unrecognized_options.append(opt[2:]) elif opt.startswith(""-""): unrecognized_options.append(opt[1:]) if unrecognized_options: msg = "", "".join(unrecognized_options) linter._arg_parser.error(f""Unrecognized option found: {msg}"") # Now that config file and command line options have been loaded # with all disables, it is safe to emit messages if unrecognized_options_message is not None: linter.set_current_module(str(config_file) if config_file else """") linter.add_message( ""unrecognized-option"", args=unrecognized_options_message, line=0 ) linter._emit_stashed_messages() # Set the current module to configuration as we don't know where # the --load-plugins key is coming from linter.set_current_module(""Command line or configuration file"") # We have loaded configuration from config file and command line. Now, we can # load plugin specific configuration. linter.load_plugin_configuration() # Now that plugins are loaded, get list of all fail_on messages, and enable them linter.enable_fail_on_messages() linter._parse_error_mode() # Link the base Namespace object on the current directory linter._directory_namespaces[Path(""."").resolve()] = (linter.config, {}) # parsed_args_list should now only be a list of files/directories to lint. # All other options have been removed from the list. return parsed_args_list ","def _config_initialization( linter: PyLinter, args_list: list[str], reporter: reporters.BaseReporter | reporters.MultiReporter | None = None, config_file: None | str | Path = None, verbose_mode: bool = False, ) -> list[str]: """"""Parse all available options, read config files and command line arguments and set options accordingly. """""" config_file = Path(config_file) if config_file else None # Set the current module to the configuration file # to allow raising messages on the configuration file. linter.set_current_module(str(config_file) if config_file else """") # Read the configuration file config_file_parser = _ConfigurationFileParser(verbose_mode, linter) try: config_data, config_args = config_file_parser.parse_config_file( file_path=config_file ) except OSError as ex: print(ex, file=sys.stderr) sys.exit(32) # Run init hook, if present, before loading plugins if ""init-hook"" in config_data: exec(utils._unquote(config_data[""init-hook""])) # pylint: disable=exec-used # Load plugins if specified in the config file if ""load-plugins"" in config_data: linter.load_plugin_modules(utils._splitstrip(config_data[""load-plugins""])) unrecognized_options_message = None # First we parse any options from a configuration file try: linter._parse_configuration_file(config_args) except _UnrecognizedOptionError as exc: unrecognized_options_message = "", "".join(exc.options) # Then, if a custom reporter is provided as argument, it may be overridden # by file parameters, so we re-set it here. We do this before command line # parsing, so it's still overridable by command line options if reporter: linter.set_reporter(reporter) # Set the current module to the command line # to allow raising messages on it linter.set_current_module(""Command line"") # Now we parse any options from the command line, so they can override # the configuration file parsed_args_list = linter._parse_command_line_configuration(args_list) # Remove the positional arguments separator from the list of arguments if it exists try: parsed_args_list.remove(""--"") except ValueError: pass # Check if there are any options that we do not recognize unrecognized_options: list[str] = [] for opt in parsed_args_list: if opt.startswith(""--""): unrecognized_options.append(opt[2:]) elif opt.startswith(""-""): unrecognized_options.append(opt[1:]) if unrecognized_options: msg = "", "".join(unrecognized_options) linter._arg_parser.error(f""Unrecognized option found: {msg}"") # Now that config file and command line options have been loaded # with all disables, it is safe to emit messages if unrecognized_options_message is not None: linter.set_current_module(str(config_file) if config_file else """") linter.add_message( ""unrecognized-option"", args=unrecognized_options_message, line=0 ) linter._emit_stashed_messages() # Set the current module to configuration as we don't know where # the --load-plugins key is coming from linter.set_current_module(""Command line or configuration file"") # We have loaded configuration from config file and command line. Now, we can # load plugin specific configuration. linter.load_plugin_configuration() # Now that plugins are loaded, get list of all fail_on messages, and enable them linter.enable_fail_on_messages() linter._parse_error_mode() # Link the base Namespace object on the current directory linter._directory_namespaces[Path(""."").resolve()] = (linter.config, {}) # parsed_args_list should now only be a list of files/directories to lint. # All other options have been removed from the list. return parsed_args_list " 50141,"def load_config(cfg): # return an updated config with entries of the correct type # support converting the old top level format into new format mycfg = cfg.get(""rsyslog"", {}) if isinstance(cfg.get(""rsyslog""), list): LOG.warning( ""This rsyslog config format is deprecated and will be removed "" ""in a future version of cloud-init"" ) mycfg = {KEYNAME_CONFIGS: cfg.get(""rsyslog"")} if KEYNAME_LEGACY_FILENAME in cfg: mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME] if KEYNAME_LEGACY_DIR in cfg: mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR] fillup = ( (KEYNAME_CONFIGS, [], list), (KEYNAME_DIR, DEF_DIR, str), (KEYNAME_FILENAME, DEF_FILENAME, str), (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict), ) for key, default, vtypes in fillup: if key not in mycfg or not isinstance(mycfg[key], vtypes): mycfg[key] = default return mycfg ","def load_config(cfg): # return an updated config with entries of the correct type # support converting the old top level format into new format mycfg = cfg.get(""rsyslog"", {}) if isinstance(cfg.get(""rsyslog""), list): LOG.warning( ""DEPRECATION: This rsyslog list format is deprecated and will be "" ""removed in a future version of cloud-init. Use documented keys."" ) mycfg = {KEYNAME_CONFIGS: cfg.get(""rsyslog"")} if KEYNAME_LEGACY_FILENAME in cfg: mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME] if KEYNAME_LEGACY_DIR in cfg: mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR] fillup = ( (KEYNAME_CONFIGS, [], list), (KEYNAME_DIR, DEF_DIR, str), (KEYNAME_FILENAME, DEF_FILENAME, str), (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict), ) for key, default, vtypes in fillup: if key not in mycfg or not isinstance(mycfg[key], vtypes): mycfg[key] = default return mycfg " 2698,"def test_pairwise_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric=""euclidean"") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric=""euclidean"") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Check to ensure NaNs work with pairwise_distances. X_masked = rng.random_sample((5, 4)) Y_masked = rng.random_sample((2, 4)) X_masked[0, 0] = np.nan Y_masked[0, 0] = np.nan S_masked = pairwise_distances(X_masked, Y_masked, metric=""nan_euclidean"") S2_masked = nan_euclidean_distances(X_masked, Y_masked) assert_array_almost_equal(S_masked, S2_masked) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric=""euclidean"") assert_array_almost_equal(S, S2) # Test haversine distance # The data should be valid latitude and longitude X = rng.random_sample((5, 2)) X[:, 0] = (X[:, 0] - 0.5) * 2 * np.pi / 2 X[:, 1] = (X[:, 1] - 0.5) * 2 * np.pi S = pairwise_distances(X, metric=""haversine"") S2 = haversine_distances(X) assert_array_almost_equal(S, S2) # Test haversine distance, with Y != X Y = rng.random_sample((2, 2)) Y[:, 0] = (Y[:, 0] - 0.5) * 2 * np.pi / 2 Y[:, 1] = (Y[:, 1] - 0.5) * 2 * np.pi S = pairwise_distances(X, Y, metric=""haversine"") S2 = haversine_distances(X, Y) assert_array_almost_equal(S, S2) # ""cityblock"" uses scikit-learn metric, cityblock (function) is # scipy.spatial. S = pairwise_distances(X, metric=""cityblock"") S2 = pairwise_distances(X, metric=cityblock) assert S.shape[0] == S.shape[1] assert S.shape[0] == X.shape[0] assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric=""manhattan"") S2 = pairwise_distances(X, Y, metric=cityblock) assert S.shape[0] == X.shape[0] assert S.shape[1] == Y.shape[0] assert_array_almost_equal(S, S2) # Test manhattan distance works with NaN S_masked = pairwise_distances(X_masked, Y_masked, metric=""nan_manhattan"") S2_masked = nan_manhattan_distances(X_masked, Y_masked) assert_array_almost_equal(S_masked, S2_masked) # Test cosine as a string metric versus cosine callable # The string ""cosine"" uses sklearn.metric, # while the function cosine is scipy.spatial S = pairwise_distances(X, Y, metric=""cosine"") S2 = pairwise_distances(X, Y, metric=cosine) assert S.shape[0] == X.shape[0] assert S.shape[1] == Y.shape[0] assert_array_almost_equal(S, S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric=""euclidean"") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric=""cosine"") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric=""manhattan"") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {""p"": 2.0} S = pairwise_distances(X, Y, metric=""minkowski"", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {""p"": 2.0} S = pairwise_distances(X, metric=""minkowski"", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given with pytest.raises(TypeError): pairwise_distances(X_sparse, metric=""minkowski"") with pytest.raises(TypeError): pairwise_distances(X, Y_sparse, metric=""minkowski"") # Test that a value error is raised if the metric is unknown with pytest.raises(ValueError): pairwise_distances(X, Y, metric=""blah"") ","def test_pairwise_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric=""euclidean"") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric=""euclidean"") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Check to ensure NaNs work with pairwise_distances. X_masked = rng.random_sample((5, 4)) Y_masked = rng.random_sample((2, 4)) X_masked[0, 0] = np.nan Y_masked[0, 0] = np.nan S_masked = pairwise_distances(X_masked, Y_masked, metric=""nan_euclidean"") S2_masked = nan_euclidean_distances(X_masked, Y_masked) assert_array_almost_equal(S_masked, S2_masked) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric=""euclidean"") assert_array_almost_equal(S, S2) # Test haversine distance # The data should be valid latitude and longitude X = rng.random_sample((5, 2)) X[:, 0] = (X[:, 0] - 0.5) * 2 * np.pi / 2 X[:, 1] = (X[:, 1] - 0.5) * 2 * np.pi S = pairwise_distances(X, metric=""haversine"") S2 = haversine_distances(X) assert_array_almost_equal(S, S2) # Test haversine distance, with Y != X Y = rng.random_sample((2, 2)) Y[:, 0] = (Y[:, 0] - 0.5) * 2 * np.pi / 2 Y[:, 1] = (Y[:, 1] - 0.5) * 2 * np.pi S = pairwise_distances(X, Y, metric=""haversine"") S2 = haversine_distances(X, Y) assert_array_almost_equal(S, S2) # ""cityblock"" uses scikit-learn metric, cityblock (function) is # scipy.spatial. S = pairwise_distances(X, metric=""cityblock"") S2 = pairwise_distances(X, metric=cityblock) assert S.shape[0] == S.shape[1] assert S.shape[0] == X.shape[0] assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric=""manhattan"") S2 = pairwise_distances(X, Y, metric=cityblock) assert S.shape[0] == X.shape[0] assert S.shape[1] == Y.shape[0] assert_array_almost_equal(S, S2) # Test manhattan distance works with NaN S_masked = pairwise_distances(X_masked, Y_masked, metric=""nan_manhattan"") S2_masked = nan_manhattan_distances(X_masked, Y_masked) assert_allclose(S_masked, S2_masked) # Test cosine as a string metric versus cosine callable # The string ""cosine"" uses sklearn.metric, # while the function cosine is scipy.spatial S = pairwise_distances(X, Y, metric=""cosine"") S2 = pairwise_distances(X, Y, metric=cosine) assert S.shape[0] == X.shape[0] assert S.shape[1] == Y.shape[0] assert_array_almost_equal(S, S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric=""euclidean"") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric=""cosine"") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric=""manhattan"") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {""p"": 2.0} S = pairwise_distances(X, Y, metric=""minkowski"", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {""p"": 2.0} S = pairwise_distances(X, metric=""minkowski"", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given with pytest.raises(TypeError): pairwise_distances(X_sparse, metric=""minkowski"") with pytest.raises(TypeError): pairwise_distances(X, Y_sparse, metric=""minkowski"") # Test that a value error is raised if the metric is unknown with pytest.raises(ValueError): pairwise_distances(X, Y, metric=""blah"") " 30972,"def main(): username = demisto.params().get('credentials').get('identifier') password = demisto.params().get('credentials').get('password') # get the service API url url = demisto.params()['url'] max_fetch = demisto.params()['max_fetch'] LOG(f'Command being called is {demisto.command()}') try: client = Client(server_url=url, username=username, password=password, max_fetch=int(max_fetch)) if demisto.command() == 'thycotic-authenticate-token': return_results(authenticate_token_command(client)) elif demisto.command() == 'thycotic-secret-password-get': return_results(secret_password_get_command(client, **demisto.args())) elif demisto.command() == 'thycotic-secret-username-get': return_results(secret_username_get_command(client, **demisto.args())) elif demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) demisto.results(result) except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') ","def main(): username = demisto.params().get('credentials').get('identifier') password = demisto.params().get('credentials').get('password') # get the service API url url = demisto.params().get('url') max_fetch = demisto.params()['max_fetch'] LOG(f'Command being called is {demisto.command()}') try: client = Client(server_url=url, username=username, password=password, max_fetch=int(max_fetch)) if demisto.command() == 'thycotic-authenticate-token': return_results(authenticate_token_command(client)) elif demisto.command() == 'thycotic-secret-password-get': return_results(secret_password_get_command(client, **demisto.args())) elif demisto.command() == 'thycotic-secret-username-get': return_results(secret_username_get_command(client, **demisto.args())) elif demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) demisto.results(result) except Exception as e: return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}') " 8387,"def fit_lines(spectrum, model, fitter=fitting.LevMarLSQFitter(), exclude_regions=None, weights=None, window=None, **kwargs): """""" Fit the input models to the spectrum. The parameter values of the input models will be used as the initial conditions for the fit. Parameters ---------- spectrum : Spectrum1D The spectrum object over which the equivalent width will be calculated. model: `~astropy.modeling.Model` or list of `~astropy.modeling.Model` The model or list of models that contain the initial guess. fitter : `~astropy.modeling.fitting.Fitter`, optional Fitter instance to be used when fitting model to spectrum. exclude_regions : list of `~specutils.SpectralRegion` List of regions to exclude in the fitting. weights : array-like or 'unc', optional If 'unc', the unceratinties from the spectrum object are used to to calculate the weights. If array-like, represents the weights to use in the fitting. Not that if a mask is present on the spectrum, it will be applied to the ``weights`` as it would be to the spectrum itself. window : `~specutils.SpectralRegion` or list of `~specutils.SpectralRegion` Regions of the spectrum to use in the fitting. If None, then the whole spectrum will be used in the fitting. Additional keyword arguments are passed directly into the call to the ``fitter``. Returns ------- models : Compound model of `~astropy.modeling.Model` A compound model of models with fitted parameters. Notes ----- * Could add functionality to set the bounds in ``model`` if they are not set. * The models in the list of ``model`` are added together and passed as a compound model to the `~astropy.modeling.fitting.Fitter` class instance. """""" # # If we are to exclude certain regions, then remove them. # if exclude_regions is not None: spectrum = excise_regions(spectrum, exclude_regions) # # Make the model a list if not already # single_model_in = not isinstance(model, list) if single_model_in: model = [model] # # If a single model is passed in then just do that. # fitted_models = [] for modeli, model_guess in enumerate(model): # # Determine the window if it is not None. There # are several options here: # window = 4 * u.Angstrom -> Quantity # window = (4*u.Angstrom, 6*u.Angstrom) -> tuple # window = (4, 6)*u.Angstrom -> Quantity # # # Determine the window if there is one # if window is not None and isinstance(window, list): model_window = window[modeli] elif window is not None: model_window = window else: model_window = None # # Check to see if the model has units. If it does not # have units then we are going to ignore them. # ignore_units = getattr(model_guess, model_guess.param_names[0]).unit is None fit_model = _fit_lines(spectrum, model_guess, fitter, exclude_regions, weights, model_window, ignore_units, **kwargs) fitted_models.append(fit_model) if single_model_in: fitted_models = fitted_models[0] return fitted_models ","def fit_lines(spectrum, model, fitter=fitting.LevMarLSQFitter(), exclude_regions=None, weights=None, window=None, **kwargs): """""" Fit the input models to the spectrum. The parameter values of the input models will be used as the initial conditions for the fit. Parameters ---------- spectrum : Spectrum1D The spectrum object over which the equivalent width will be calculated. model: `~astropy.modeling.Model` or list of `~astropy.modeling.Model` The model or list of models that contain the initial guess. fitter : `~astropy.modeling.fitting.Fitter`, optional Fitter instance to be used when fitting model to spectrum. exclude_regions : list of `~specutils.SpectralRegion` List of regions to exclude in the fitting. weights : array-like or 'unc', optional If 'unc', the unceratinties from the spectrum object are used to to calculate the weights. If array-like, represents the weights to use in the fitting. Note that if a mask is present on the spectrum, it will be applied to the ``weights`` as it would be to the spectrum itself. window : `~specutils.SpectralRegion` or list of `~specutils.SpectralRegion` Regions of the spectrum to use in the fitting. If None, then the whole spectrum will be used in the fitting. Additional keyword arguments are passed directly into the call to the ``fitter``. Returns ------- models : Compound model of `~astropy.modeling.Model` A compound model of models with fitted parameters. Notes ----- * Could add functionality to set the bounds in ``model`` if they are not set. * The models in the list of ``model`` are added together and passed as a compound model to the `~astropy.modeling.fitting.Fitter` class instance. """""" # # If we are to exclude certain regions, then remove them. # if exclude_regions is not None: spectrum = excise_regions(spectrum, exclude_regions) # # Make the model a list if not already # single_model_in = not isinstance(model, list) if single_model_in: model = [model] # # If a single model is passed in then just do that. # fitted_models = [] for modeli, model_guess in enumerate(model): # # Determine the window if it is not None. There # are several options here: # window = 4 * u.Angstrom -> Quantity # window = (4*u.Angstrom, 6*u.Angstrom) -> tuple # window = (4, 6)*u.Angstrom -> Quantity # # # Determine the window if there is one # if window is not None and isinstance(window, list): model_window = window[modeli] elif window is not None: model_window = window else: model_window = None # # Check to see if the model has units. If it does not # have units then we are going to ignore them. # ignore_units = getattr(model_guess, model_guess.param_names[0]).unit is None fit_model = _fit_lines(spectrum, model_guess, fitter, exclude_regions, weights, model_window, ignore_units, **kwargs) fitted_models.append(fit_model) if single_model_in: fitted_models = fitted_models[0] return fitted_models " 35709,"def _resnet_fpn_extractor( backbone: resnet.ResNet, trainable_layers: int, returned_layers: Optional[List[int]] = None, extra_blocks: Optional[ExtraFPNBlock] = None, ) -> BackboneWithFPN: # select layers that wont be frozen if trainable_layers not in range(0, 6): raise ValueError(f"" trainable_layers expected to be in [0,5], got {trainable_layers}"") layers_to_train = [""layer4"", ""layer3"", ""layer2"", ""layer1"", ""conv1""][:trainable_layers] if trainable_layers == 5: layers_to_train.append(""bn1"") for name, parameter in backbone.named_parameters(): if all([not name.startswith(layer) for layer in layers_to_train]): parameter.requires_grad_(False) if extra_blocks is None: extra_blocks = LastLevelMaxPool() if returned_layers is None: returned_layers = [1, 2, 3, 4] if min(returned_layers) <= 0 or max(returned_layers) >= 5: raise ValueError(f"" `returned_layers` object should contain integers between [1,4], got {returned_layers} "") return_layers = {f""layer{k}"": str(v) for v, k in enumerate(returned_layers)} in_channels_stage2 = backbone.inplanes // 8 in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] out_channels = 256 return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks) ","def _resnet_fpn_extractor( backbone: resnet.ResNet, trainable_layers: int, returned_layers: Optional[List[int]] = None, extra_blocks: Optional[ExtraFPNBlock] = None, ) -> BackboneWithFPN: # select layers that wont be frozen if trainable_layers not in range(0, 6): raise ValueError(f"" trainable_layers expected to be in [0,5], got {trainable_layers}"") layers_to_train = [""layer4"", ""layer3"", ""layer2"", ""layer1"", ""conv1""][:trainable_layers] if trainable_layers == 5: layers_to_train.append(""bn1"") for name, parameter in backbone.named_parameters(): if all([not name.startswith(layer) for layer in layers_to_train]): parameter.requires_grad_(False) if extra_blocks is None: extra_blocks = LastLevelMaxPool() if returned_layers is None: returned_layers = [1, 2, 3, 4] if min(returned_layers) <= 0 or max(returned_layers) >= 5: raise ValueError(f""Each returned layer must be in the range [1,4]. Got {returned_layers}"") return_layers = {f""layer{k}"": str(v) for v, k in enumerate(returned_layers)} in_channels_stage2 = backbone.inplanes // 8 in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] out_channels = 256 return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks) " 15571,"def get_accessory(hass, driver, state, aid, config): """"""Take state and return an accessory object if supported."""""" if not aid: _LOGGER.warning( 'The entity ""%s"" is not supported, since it ' ""generates an invalid aid, please change it"", state.entity_id, ) return None a_type = None name = config.get(CONF_NAME, state.name) features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if state.domain == ""alarm_control_panel"": a_type = ""SecuritySystem"" elif state.domain in (""binary_sensor"", ""device_tracker"", ""person""): a_type = ""BinarySensor"" elif state.domain == ""climate"": a_type = ""Thermostat"" elif state.domain == ""cover"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) if device_class in (DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE) and features & ( cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE ): a_type = ""GarageDoorOpener"" elif ( device_class == DEVICE_CLASS_WINDOW and features & cover.SUPPORT_SET_POSITION ): a_type = ""Window"" elif features & cover.SUPPORT_SET_POSITION: a_type = ""WindowCovering"" elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE): a_type = ""WindowCoveringBasic"" elif state.domain == ""fan"": a_type = ""Fan"" elif state.domain == ""humidifier"": a_type = ""HumidifierDehumidifier"" elif state.domain == ""light"": a_type = ""Light"" elif state.domain == ""lock"": a_type = ""Lock"" elif state.domain == ""media_player"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) feature_list = config.get(CONF_FEATURE_LIST, []) if device_class == DEVICE_CLASS_TV: a_type = ""TelevisionMediaPlayer"" elif validate_media_player_features(state, feature_list): a_type = ""MediaPlayer"" elif state.domain == ""sensor"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class == DEVICE_CLASS_TEMPERATURE or unit in ( TEMP_CELSIUS, TEMP_FAHRENHEIT, ): a_type = ""TemperatureSensor"" elif device_class == DEVICE_CLASS_HUMIDITY and unit == PERCENTAGE: a_type = ""HumiditySensor"" elif device_class == DEVICE_CLASS_PM25 or DEVICE_CLASS_PM25 in state.entity_id: a_type = ""AirQualitySensor"" elif device_class == DEVICE_CLASS_CO: a_type = ""CarbonMonoxideSensor"" elif device_class == DEVICE_CLASS_CO2 or DEVICE_CLASS_CO2 in state.entity_id: a_type = ""CarbonDioxideSensor"" elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in (""lm"", LIGHT_LUX): a_type = ""LightSensor"" elif state.domain == ""switch"": switch_type = config.get(CONF_TYPE, TYPE_SWITCH) a_type = SWITCH_TYPES[switch_type] elif state.domain == ""vacuum"": a_type = ""Vacuum"" elif state.domain == ""remote"" and features & SUPPORT_ACTIVITY: a_type = ""TelevisionRemote"" elif state.domain in (""automation"", ""input_boolean"", ""remote"", ""scene"", ""script""): a_type = ""Switch"" elif state.domain == ""water_heater"": a_type = ""WaterHeater"" elif state.domain == ""camera"": a_type = ""Camera"" if a_type is None: return None _LOGGER.debug('Add ""%s"" as ""%s""', state.entity_id, a_type) return TYPES[a_type](hass, driver, name, state.entity_id, aid, config) ","def get_accessory(hass, driver, state, aid, config): """"""Take state and return an accessory object if supported."""""" if not aid: _LOGGER.warning( 'The entity ""%s"" is not supported, since it ' ""generates an invalid aid, please change it"", state.entity_id, ) return None a_type = None name = config.get(CONF_NAME, state.name) features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if state.domain == ""alarm_control_panel"": a_type = ""SecuritySystem"" elif state.domain in (""binary_sensor"", ""device_tracker"", ""person""): a_type = ""BinarySensor"" elif state.domain == ""climate"": a_type = ""Thermostat"" elif state.domain == ""cover"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) if device_class in (DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE) and features & ( cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE ): a_type = ""GarageDoorOpener"" elif ( device_class == DEVICE_CLASS_WINDOW and features & cover.SUPPORT_SET_POSITION ): a_type = ""Window"" elif features & cover.SUPPORT_SET_POSITION: a_type = ""WindowCovering"" elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE): a_type = ""WindowCoveringBasic"" elif state.domain == ""fan"": a_type = ""Fan"" elif state.domain == ""humidifier"": a_type = ""HumidifierDehumidifier"" elif state.domain == ""light"": a_type = ""Light"" elif state.domain == ""lock"": a_type = ""Lock"" elif state.domain == ""media_player"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) feature_list = config.get(CONF_FEATURE_LIST, []) if device_class == DEVICE_CLASS_TV: a_type = ""TelevisionMediaPlayer"" elif validate_media_player_features(state, feature_list): a_type = ""MediaPlayer"" elif state.domain == ""sensor"": device_class = state.attributes.get(ATTR_DEVICE_CLASS) unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class == DEVICE_CLASS_TEMPERATURE or unit in ( TEMP_CELSIUS, TEMP_FAHRENHEIT, ): a_type = ""TemperatureSensor"" elif device_class == DEVICE_CLASS_HUMIDITY and unit == PERCENTAGE: a_type = ""HumiditySensor"" elif device_class == DEVICE_CLASS_PM25 or DEVICE_CLASS_PM25 in state.entity_id: a_type = ""AirQualitySensor"" elif device_class == DEVICE_CLASS_CO: a_type = ""CarbonMonoxideSensor"" elif device_class == DEVICE_CLASS_CO2 or DEVICE_CLASS_CO2 in state.entity_id: a_type = ""CarbonDioxideSensor"" elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in (""lm"", LIGHT_LUX): a_type = ""LightSensor"" elif state.domain == ""switch"": switch_type = config.get(CONF_TYPE, TYPE_SWITCH) a_type = SWITCH_TYPES[switch_type] elif state.domain == ""vacuum"": a_type = ""Vacuum"" elif state.domain == ""remote"" and features & SUPPORT_ACTIVITY: a_type = ""ActivityRemote"" elif state.domain in (""automation"", ""input_boolean"", ""remote"", ""scene"", ""script""): a_type = ""Switch"" elif state.domain == ""water_heater"": a_type = ""WaterHeater"" elif state.domain == ""camera"": a_type = ""Camera"" if a_type is None: return None _LOGGER.debug('Add ""%s"" as ""%s""', state.entity_id, a_type) return TYPES[a_type](hass, driver, name, state.entity_id, aid, config) " 17693,"def load_extensions(): """"""Load entrypoint for any configured extension package Log a warning in case a requested extension is not available, or if a requested extension fails on load. Extensions to load are taken from the 'datalad.extensions.load' configuration item. """""" from datalad import cfg load_extensions = cfg.get('datalad.extensions.load', get_all=True) if load_extensions: from datalad.utils import ensure_list exts = { ename: eload for ename, _, eload in iter_entrypoints('datalad.extensions') } for el in ensure_list(load_extensions): if el not in exts: lgr.warning('Requested extension %r is not available', el) continue try: exts[el]() except Exception as e: ce = CapturedException(e) lgr.warning('Could not load extension %r: %s', ce) ","def load_extensions(): """"""Load entrypoint for any configured extension package Log a warning in case a requested extension is not available, or if a requested extension fails on load. Extensions to load are taken from the 'datalad.extensions.load' configuration item. """""" from datalad import cfg load_extensions = cfg.get('datalad.extensions.load', get_all=True) if load_extensions: from datalad.utils import ensure_list exts = { ename: eload for ename, _, eload in iter_entrypoints('datalad.extensions') } for el in ensure_list(load_extensions): if el not in exts: lgr.warning('Requested extension %r is not available', el) continue try: exts[el]() except Exception as e: ce = CapturedException(e) lgr.warning('Could not load extension %r: %s',el, ce) " 24647,"def get_file(basename, base_url=_BASE_URL): r"""""" Downloads a file from a URL (if the file does not already exist) and returns the full local path to the file. Parameters ---------- basename : str Name of the file to be downloaded (extension included). base_url : str, optional The base URL of the file to be downloaded. Defaults to the main directory of the PlasmaPy data repository. Returns ------- path : str The full local path to the downloaded file. """""" if not ""."" in str(basename): raise ValueError(f""'filename' ({basename}) must include an extension."") path = os.path.join(_DOWNLOADS_PATH, basename) # If file doesn't exist, download it if not os.path.exists(path): url = urljoin(base_url, basename) # Get the requested content r = requests.get(url) # Validate that the content type matches one of the content types # the module knows how to download. # # Missing files on GitHub will resolve to a 404 html page, so we use # this as an indicator that the file may not exist. allowed_types = [""text/plain; charset=utf-8"", ""image/png""] if not r.headers[""Content-Type""] in allowed_types: raise OSError( f""The requested file is not an allowed"" f""Content-Type: {r.headers['Content-Type']}."" ""This may indicate that the file does not exist at "" ""the URL provided."" ) # Write the content to disk with open(path, ""wb"") as f: f.write(r.content) return path ","def get_file(basename, base_url=_BASE_URL): r"""""" Downloads a file from a URL (if the file does not already exist) and returns the full local path to the file. Parameters ---------- basename : str Name of the file to be downloaded (extension included). base_url : str, optional The base URL of the file to be downloaded. Defaults to the main directory of the PlasmaPy data repository. Returns ------- path : str The full local path to the downloaded file. """""" if not ""."" in str(basename): raise ValueError(f""'filename' ({basename}) must include an extension."") path = os.path.join(_DOWNLOADS_PATH, basename) # If file doesn't exist, download it if not os.path.exists(path): url = urljoin(base_url, basename) requested_content = requests.get(url) # Validate that the content type matches one of the content types # the module knows how to download. # # Missing files on GitHub will resolve to a 404 html page, so we use # this as an indicator that the file may not exist. allowed_types = [""text/plain; charset=utf-8"", ""image/png""] if not r.headers[""Content-Type""] in allowed_types: raise OSError( f""The requested file is not an allowed"" f""Content-Type: {r.headers['Content-Type']}."" ""This may indicate that the file does not exist at "" ""the URL provided."" ) # Write the content to disk with open(path, ""wb"") as f: f.write(r.content) return path " 38452,"def purge0d_faces_and_nodes(gb): for g, d in gb: if g.dim != 0: continue _purge_face_and_nodes_from_grid(g) for e, d in gb.edges(): mg = d[""mortar_grid""] if mg.dim != 0: continue for sg in mg.side_grids.values(): _purge_face_and_nodes_from_grid(sg) ","def purge_0d_faces_and_nodes(gb): for g, d in gb: if g.dim != 0: continue _purge_face_and_nodes_from_grid(g) for e, d in gb.edges(): mg = d[""mortar_grid""] if mg.dim != 0: continue for sg in mg.side_grids.values(): _purge_face_and_nodes_from_grid(sg) " 58911,"def save_ansys_path(exe_loc=None): # pragma: no cover """"""Find ANSYS path or query user. If no ``exe_loc`` argument is supplied, this function attempt to obtain the MAPDL executable from (and in order): - The default ansys paths (i.e. 'C:/Program Files/Ansys Inc/vXXX/ansys/bin/ansysXXX') - The configuration file - User input If ``exe_loc`` is supplied, this function does some checks. If successful, it will write that ``exe_loc`` into the config file. Parameters ---------- exe_loc : str, optional Path of the MAPDL executable ('ansysXXX'), by default None Returns ------- str Path of the MAPDL executable. Notes ----- The configuration file location (``config.txt``) can be found in ``appdirs.user_data_dir(""ansys_mapdl_core"")``. For example: >>> import appdirs >>> import os >>> print(os.path.join(appdirs.user_data_dir(""ansys_mapdl_core""), ""config.txt"")) C:/Users/gayuso/AppData/Local/ansys_mapdl_core/ansys_mapdl_core/config.txt You can change the default ``exe_loc`` either by modifying the mentioned ``config.txt`` file or by executing this function: >>> from ansys.mapdl.core.launcher import save_ansys_path >>> save_ansys_path('/new/path/to/executable') """""" if exe_loc is None: exe_loc, _ = find_ansys() if is_valid_executable_path(exe_loc): # pragma: not cover if not is_common_executable_path(exe_loc): warn_uncommon_executable_path(exe_loc) change_default_ansys_path(exe_loc) return exe_loc if exe_loc is not None: if is_valid_executable_path(exe_loc): return exe_loc # pragma: no cover # otherwise, query user for the location print(""Cached ANSYS executable not found"") print( ""You are about to enter manually the path of the ANSYS MAPDL executable (ansysXXX, where XXX is the version"" ""This file is very likely to contained in path ending in 'vXXX/ansys/bin/ansysXXX', but it is not required.\n"" ""\nIf you experience problems with the input path you can overwrite the configuration file by typing:\n"" "">>> from ansys.mapdl.core.launcher import save_ansys_path\n"" "">>> save_ansys_path('/new/path/to/executable/')\n"" ) need_path = True while need_path: # pragma: no cover exe_loc = input(""Enter the location of an ANSYS executable (ansysXXX):"") if is_valid_executable_path(exe_loc): if not is_common_executable_path(exe_loc): warn_uncommon_executable_path(exe_loc) with open(CONFIG_FILE, ""w"") as f: f.write(exe_loc) need_path = False else: print( ""The supplied path is either: not a valid file path, or does not match 'ansysXXX' name."" ) return exe_loc ","def save_ansys_path(exe_loc=None): # pragma: no cover """"""Find ANSYS path or query user. If no ``exe_loc`` argument is supplied, this function attempt to obtain the MAPDL executable from (and in order): - The default ansys paths (i.e. 'C:/Program Files/Ansys Inc/vXXX/ansys/bin/ansysXXX') - The configuration file - User input If ``exe_loc`` is supplied, this function does some checks. If successful, it will write that ``exe_loc`` into the config file. Parameters ---------- exe_loc : str, optional Path of the MAPDL executable ('ansysXXX'), by default None Returns ------- str Path of the MAPDL executable. Notes ----- The configuration file location (``config.txt``) can be found in ``appdirs.user_data_dir(""ansys_mapdl_core"")``. For example: .. code:: python >>> import appdirs >>> import os >>> print(os.path.join(appdirs.user_data_dir(""ansys_mapdl_core""), ""config.txt"")) C:/Users/gayuso/AppData/Local/ansys_mapdl_core/ansys_mapdl_core/config.txt You can change the default ``exe_loc`` either by modifying the mentioned ``config.txt`` file or by executing this function: .. code:: python >>> from ansys.mapdl.core.launcher import save_ansys_path >>> save_ansys_path('/new/path/to/executable') """""" if exe_loc is None: exe_loc, _ = find_ansys() if is_valid_executable_path(exe_loc): # pragma: not cover if not is_common_executable_path(exe_loc): warn_uncommon_executable_path(exe_loc) change_default_ansys_path(exe_loc) return exe_loc if exe_loc is not None: if is_valid_executable_path(exe_loc): return exe_loc # pragma: no cover # otherwise, query user for the location print(""Cached ANSYS executable not found"") print( ""You are about to enter manually the path of the ANSYS MAPDL executable (ansysXXX, where XXX is the version"" ""This file is very likely to contained in path ending in 'vXXX/ansys/bin/ansysXXX', but it is not required.\n"" ""\nIf you experience problems with the input path you can overwrite the configuration file by typing:\n"" "">>> from ansys.mapdl.core.launcher import save_ansys_path\n"" "">>> save_ansys_path('/new/path/to/executable/')\n"" ) need_path = True while need_path: # pragma: no cover exe_loc = input(""Enter the location of an ANSYS executable (ansysXXX):"") if is_valid_executable_path(exe_loc): if not is_common_executable_path(exe_loc): warn_uncommon_executable_path(exe_loc) with open(CONFIG_FILE, ""w"") as f: f.write(exe_loc) need_path = False else: print( ""The supplied path is either: not a valid file path, or does not match 'ansysXXX' name."" ) return exe_loc " 14301,"def maxCtxFont(font): """"""Calculate the usMaxContext value for an entire font."""""" maxCtx = 0 for tag in ('GSUB', 'GPOS'): if tag not in font: continue table = font[tag].table if getattr(table, 'LookupList', None) is None: continue for lookup in table.LookupList.Lookup: for st in lookup.SubTable: maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) return maxCtx ","def maxCtxFont(font): """"""Calculate the usMaxContext value for an entire font."""""" maxCtx = 0 for tag in ('GSUB', 'GPOS'): if tag not in font: continue table = font[tag].table if not table.LookupList: continue for lookup in table.LookupList.Lookup: for st in lookup.SubTable: maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) return maxCtx " 57807,"def ip_command(ips): try: from urllib2 import build_opener, ProxyHandler from ipwhois import IPWhois except ImportError as e: return_error(""The Docker needs to be updated to use an IP command"") results = [] for ip in argToList(ips): if demisto.params().get('proxy'): proxies = assign_params(http=handle_proxy().get('http'), https=handle_proxy().get('https')) handler = ProxyHandler(proxies) opener = build_opener(handler) ip_obj = IPWhois(ip, proxy_opener=opener) else: ip_obj = IPWhois(ip) response = ip_obj.lookup_rdap(depth=1) dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name='Whois', score=Common.DBotScore.NONE ) related_feed = Common.FeedRelatedIndicators( value=response.get('network', {}).get('cidr'), indicator_type='IP' ) ip_output = Common.IP( ip=ip, asn=response.get('asn'), geo_country=response.get('asn_country_code'), organization_name=response.get('asn_description'), dbot_score=dbot_score, feed_related_indicators=related_feed ) result = CommandResults( outputs_prefix='Whois.IP', outputs_key_field='query', outputs=response, readable_output=tableToMarkdown('Whois results:', response, ['query', 'asn', 'asn_cidr', 'asn_country_code', 'asn_date', 'asn_description']), raw_response=response, indicator=ip_output ) results.append(result) return results ","def ip_command(ips): try: from urllib2 import build_opener, ProxyHandler from ipwhois import IPWhois except ImportError as e: return_error(""The Docker needs to be updated to use an IP command"") results = [] for ip in argToList(ips): proxy_opener = None if demisto.params().get('proxy'): proxies = assign_params(http=handle_proxy().get('http'), https=handle_proxy().get('https')) handler = ProxyHandler(proxies) opener = build_opener(handler) proxy_opener=opener ip_obj = IPWhois(ip, proxy_opener) response = ip_obj.lookup_rdap(depth=1) dbot_score = Common.DBotScore( indicator=ip, indicator_type=DBotScoreType.IP, integration_name='Whois', score=Common.DBotScore.NONE ) related_feed = Common.FeedRelatedIndicators( value=response.get('network', {}).get('cidr'), indicator_type='IP' ) ip_output = Common.IP( ip=ip, asn=response.get('asn'), geo_country=response.get('asn_country_code'), organization_name=response.get('asn_description'), dbot_score=dbot_score, feed_related_indicators=related_feed ) result = CommandResults( outputs_prefix='Whois.IP', outputs_key_field='query', outputs=response, readable_output=tableToMarkdown('Whois results:', response, ['query', 'asn', 'asn_cidr', 'asn_country_code', 'asn_date', 'asn_description']), raw_response=response, indicator=ip_output ) results.append(result) return results " 22744,"def _extract_plugin_name(dns_snap_path): return re.match(r'^.*certbot-(dns-\w+)_.*\.snap$', dns_snap_path).group(1) ","def _extract_plugin_name(dns_snap_path): return re.match(r'^certbot-(dns-\w+)_.*\.snap$', dns_snap_path).group(1) " 6501,"def validate_for_tax_category(doc): if frappe.db.exists(doc.doctype, {""company"": doc.company, ""tax_category"": doc.tax_category, ""disabled"": 0}): frappe.throw(_(""A template with tax category {0} already exist. Only one template is allowed with each tax category"").format(frappe.bold(doc.tax_category))) ","def validate_for_tax_category(doc): if frappe.db.exists(doc.doctype, {""company"": doc.company, ""tax_category"": doc.tax_category, ""disabled"": 0}): frappe.throw(_(""A template with tax category {0} already exists. Only one template is allowed with each tax category"").format(frappe.bold(doc.tax_category))) " 12782,"def generate_targets_metadata(targets_directory, target_files, version, expiration_date, delegations=None, write_consistent_targets=False, use_existing_fileinfo=False, storage_backend=None, repository_name='default'): """""" Generate the targets metadata object. The targets in 'target_files' must exist at the same path they should on the repo. 'target_files' is a list of targets. The 'custom' field of the targets metadata is not currently supported. targets_directory: The absolute path to a directory containing the target files and directories of the repository. target_files: The target files tracked by 'targets.json'. 'target_files' is a dictionary mapping target paths (relative to the targets directory) to a dict matching tuf.formats.LOOSE_FILEINFO_SCHEMA. LOOSE_FILEINFO_SCHEMA can support multiple different value patterns: 1) an empty dictionary - for when fileinfo should be generated 2) a dictionary matching tuf.formats.CUSTOM_SCHEMA - for when fileinfo should be generated, with the supplied custom metadata attached 3) a dictionary matching tuf.formats.FILEINFO_SCHEMA - for when full fileinfo is provided in conjunction with use_existing_fileinfo version: The metadata version number. Clients use the version number to determine if the downloaded version is newer than the one currently trusted. expiration_date: The expiration date of the metadata file. Conformant to 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. delegations: The delegations made by the targets role to be generated. 'delegations' must match 'tuf.formats.DELEGATIONS_SCHEMA'. write_consistent_targets: Boolean that indicates whether file digests should be prepended to the target files. NOTE: it is an error for write_consistent_targets to be True when use_existing_fileinfo is also True. We can not create consistent targets for a target file where the fileinfo isn't generated by tuf. use_existing_fileinfo: Boolean that indicates whether to use the complete fileinfo, including hashes, as already exists in the roledb (True) or whether to generate hashes (False). storage_backend: An object which implements securesystemslib.storage.StorageBackendInterface. repository_name: The name of the repository. If not supplied, 'default' repository is used. securesystemslib.exceptions.FormatError, if an error occurred trying to generate the targets metadata object. securesystemslib.exceptions.Error, if use_existing_fileinfo is False and any of the target files cannot be read. securesystemslib.exceptions.Error, if use_existing_fileinfo is True and some of the target files do not have corresponding hashes in the roledb. securesystemslib.exceptions.Error, if both of use_existing_fileinfo and write_consistent_targets are True. If use_existing_fileinfo is False, the target files are read from storage and file information about them is generated. If 'write_consistent_targets' is True, each target in 'target_files' will be copied to a file with a digest prepended to its filename. For example, if 'some_file.txt' is one of the targets of 'target_files', consistent targets .some_file.txt, .some_file.txt, etc., are created and the content of 'some_file.txt' will be copied into them. A targets metadata object, conformant to 'tuf.formats.TARGETS_SCHEMA'. delegation_update: Boolean set to True if delegations are updated. """""" # Do the arguments have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.PATH_SCHEMA.check_match(targets_directory) tuf.formats.PATH_FILEINFO_SCHEMA.check_match(target_files) tuf.formats.METADATAVERSION_SCHEMA.check_match(version) securesystemslib.formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(write_consistent_targets) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(use_existing_fileinfo) if write_consistent_targets and use_existing_fileinfo: raise securesystemslib.exceptions.Error('Cannot support writing consistent' ' targets and using existing fileinfo.') if delegations: tuf.formats.DELEGATIONS_SCHEMA.check_match(delegations) # Check if delegated role keys or threshold are changed and update # delegations accordingly. Collect a new delegations key dictionary. keydict = {} delegation_update = False for role in delegations['roles']: rolename = role['name'] role_keyids = tuf.roledb.get_role_keyids(rolename, repository_name) role_threshold= tuf.roledb.get_role_threshold(rolename, repository_name) if role_keyids != role['keyids'] or \ role_threshold != role['threshold']: role['keyids'] = role_keyids role['threshold'] = role_threshold delegation_update = True _add_keys_to_keydict(keydict, role_keyids, repository_name) if delegation_update: delegations['keys'] = keydict # Store the file attributes of targets in 'target_files'. 'filedict', # conformant to 'tuf.formats.FILEDICT_SCHEMA', is added to the # targets metadata object returned. filedict = {} if use_existing_fileinfo: # Use the provided fileinfo dicts, conforming to FILEINFO_SCHEMA, rather than # generating fileinfo for target, fileinfo in six.iteritems(target_files): # Ensure all fileinfo entries in target_files have a non-empty hashes dict if not fileinfo.get('hashes', None): raise securesystemslib.exceptions.Error('use_existing_hashes option set' ' but no hashes exist in roledb for ' + repr(target)) # and a non-empty length if fileinfo.get('length', -1) < 0: raise securesystemslib.exceptions.Error('use_existing_hashes option set' ' but fileinfo\'s length is not set') filedict[target] = fileinfo else: # Generate the fileinfo dicts by accessing the target files on storage. # Default to accessing files on local storage. if storage_backend is None: storage_backend = securesystemslib.storage.FilesystemBackend() filedict = _generate_targets_fileinfo(target_files, targets_directory, write_consistent_targets, storage_backend) # Generate the targets metadata object. # Use generalized build_dict_conforming_to_schema func to produce a dict that # contains all the appropriate information for targets metadata, # checking that the result conforms to the appropriate schema. # TODO: Later, probably after the rewrite for TUF Issue #660, generalize # further, upward, by replacing generate_targets_metadata, # generate_root_metadata, etc. with one function that generates # metadata, possibly rolling that upwards into the calling function. # There are very few things that really need to be done differently. if delegations is not None: return tuf.formats.build_dict_conforming_to_schema( tuf.formats.TARGETS_SCHEMA, version=version, expires=expiration_date, targets=filedict, delegations=delegations), delegation_update else: return tuf.formats.build_dict_conforming_to_schema( tuf.formats.TARGETS_SCHEMA, version=version, expires=expiration_date, targets=filedict), False # TODO: As an alternative to the odd if/else above where we decide whether or ","def generate_targets_metadata(targets_directory, target_files, version, expiration_date, delegations=None, write_consistent_targets=False, use_existing_fileinfo=False, storage_backend=None, repository_name='default'): """""" Generate the targets metadata object. The targets in 'target_files' must exist at the same path they should on the repo. 'target_files' is a list of targets. The 'custom' field of the targets metadata is not currently supported. targets_directory: The absolute path to a directory containing the target files and directories of the repository. target_files: The target files tracked by 'targets.json'. 'target_files' is a dictionary mapping target paths (relative to the targets directory) to a dict matching tuf.formats.LOOSE_FILEINFO_SCHEMA. LOOSE_FILEINFO_SCHEMA can support multiple different value patterns: 1) an empty dictionary - for when fileinfo should be generated 2) a dictionary matching tuf.formats.CUSTOM_SCHEMA - for when fileinfo should be generated, with the supplied custom metadata attached 3) a dictionary matching tuf.formats.FILEINFO_SCHEMA - for when full fileinfo is provided in conjunction with use_existing_fileinfo version: The metadata version number. Clients use the version number to determine if the downloaded version is newer than the one currently trusted. expiration_date: The expiration date of the metadata file. Conformant to 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. delegations: The delegations made by the targets role to be generated. 'delegations' must match 'tuf.formats.DELEGATIONS_SCHEMA'. write_consistent_targets: Boolean that indicates whether file digests should be prepended to the target files. NOTE: it is an error for write_consistent_targets to be True when use_existing_fileinfo is also True. We can not create consistent targets for a target file where the fileinfo isn't generated by tuf. use_existing_fileinfo: Boolean that indicates whether to use the complete fileinfo, including hashes, as already exists in the roledb (True) or whether to generate hashes (False). storage_backend: An object which implements securesystemslib.storage.StorageBackendInterface. repository_name: The name of the repository. If not supplied, 'default' repository is used. securesystemslib.exceptions.FormatError, if an error occurred trying to generate the targets metadata object. securesystemslib.exceptions.Error, if use_existing_fileinfo is False and any of the target files cannot be read. securesystemslib.exceptions.Error, if use_existing_fileinfo is True and some of the target files do not have corresponding hashes in the roledb. securesystemslib.exceptions.Error, if both of use_existing_fileinfo and write_consistent_targets are True. If use_existing_fileinfo is False, the target files are read from storage and file information about them is generated. If 'write_consistent_targets' is True, each target in 'target_files' will be copied to a file with a digest prepended to its filename. For example, if 'some_file.txt' is one of the targets of 'target_files', consistent targets .some_file.txt, .some_file.txt, etc., are created and the content of 'some_file.txt' will be copied into them. A targets metadata object, conformant to 'tuf.formats.TARGETS_SCHEMA'. delegation_update: Boolean set to True if delegations are updated. """""" # Do the arguments have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.PATH_SCHEMA.check_match(targets_directory) tuf.formats.PATH_FILEINFO_SCHEMA.check_match(target_files) tuf.formats.METADATAVERSION_SCHEMA.check_match(version) securesystemslib.formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(write_consistent_targets) securesystemslib.formats.BOOLEAN_SCHEMA.check_match(use_existing_fileinfo) if write_consistent_targets and use_existing_fileinfo: raise securesystemslib.exceptions.Error('Cannot support writing consistent' ' targets and using existing fileinfo.') if delegations: tuf.formats.DELEGATIONS_SCHEMA.check_match(delegations) # Check if delegated role keys or threshold are changed and update # delegations accordingly. Collect a new delegations key dictionary. keydict = {} delegation_update = False for role in delegations['roles']: rolename = role['name'] role_keyids = tuf.roledb.get_role_keyids(rolename, repository_name) role_threshold = tuf.roledb.get_role_threshold(rolename, repository_name) if role_keyids != role['keyids'] or \ role_threshold != role['threshold']: role['keyids'] = role_keyids role['threshold'] = role_threshold delegation_update = True _add_keys_to_keydict(keydict, role_keyids, repository_name) if delegation_update: delegations['keys'] = keydict # Store the file attributes of targets in 'target_files'. 'filedict', # conformant to 'tuf.formats.FILEDICT_SCHEMA', is added to the # targets metadata object returned. filedict = {} if use_existing_fileinfo: # Use the provided fileinfo dicts, conforming to FILEINFO_SCHEMA, rather than # generating fileinfo for target, fileinfo in six.iteritems(target_files): # Ensure all fileinfo entries in target_files have a non-empty hashes dict if not fileinfo.get('hashes', None): raise securesystemslib.exceptions.Error('use_existing_hashes option set' ' but no hashes exist in roledb for ' + repr(target)) # and a non-empty length if fileinfo.get('length', -1) < 0: raise securesystemslib.exceptions.Error('use_existing_hashes option set' ' but fileinfo\'s length is not set') filedict[target] = fileinfo else: # Generate the fileinfo dicts by accessing the target files on storage. # Default to accessing files on local storage. if storage_backend is None: storage_backend = securesystemslib.storage.FilesystemBackend() filedict = _generate_targets_fileinfo(target_files, targets_directory, write_consistent_targets, storage_backend) # Generate the targets metadata object. # Use generalized build_dict_conforming_to_schema func to produce a dict that # contains all the appropriate information for targets metadata, # checking that the result conforms to the appropriate schema. # TODO: Later, probably after the rewrite for TUF Issue #660, generalize # further, upward, by replacing generate_targets_metadata, # generate_root_metadata, etc. with one function that generates # metadata, possibly rolling that upwards into the calling function. # There are very few things that really need to be done differently. if delegations is not None: return tuf.formats.build_dict_conforming_to_schema( tuf.formats.TARGETS_SCHEMA, version=version, expires=expiration_date, targets=filedict, delegations=delegations), delegation_update else: return tuf.formats.build_dict_conforming_to_schema( tuf.formats.TARGETS_SCHEMA, version=version, expires=expiration_date, targets=filedict), False # TODO: As an alternative to the odd if/else above where we decide whether or " 35665,"def main(args): if args.weights and PM is None: raise ImportError(""The prototype module couldn't be found. Please install the latest torchvision nightly."") if args.output_dir: utils.mkdir(args.output_dir) utils.init_distributed_mode(args) print(args) device = torch.device(args.device) # Data loading code print(""Loading data"") dataset, num_classes = get_dataset(args.dataset, ""train"", get_transform(True, args), args.data_path) dataset_test, _ = get_dataset(args.dataset, ""val"", get_transform(False, args), args.data_path) print(""Creating data loaders"") if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) else: train_sampler = torch.utils.data.RandomSampler(dataset) test_sampler = torch.utils.data.SequentialSampler(dataset_test) if args.aspect_ratio_group_factor >= 0: group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor) train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size) else: train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True) data_loader = torch.utils.data.DataLoader( dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=utils.collate_fn ) data_loader_test = torch.utils.data.DataLoader( dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn ) print(""Creating model"") kwargs = {""trainable_backbone_layers"": args.trainable_backbone_layers} if ""rcnn"" in args.model: if args.rpn_score_thresh is not None: kwargs[""rpn_score_thresh""] = args.rpn_score_thresh if not args.weights: model = torchvision.models.detection.__dict__[args.model]( pretrained=args.pretrained, num_classes=num_classes, **kwargs ) else: model = PM.detection.__dict__[args.model](weights=args.weights, num_classes=num_classes, **kwargs) model.to(device) if args.distributed and args.sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) scaler = torch.cuda.amp.GradScaler() if args.amp else None args.lr_scheduler = args.lr_scheduler.lower() if args.lr_scheduler == ""multisteplr"": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma) elif args.lr_scheduler == ""cosineannealinglr"": lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) else: raise RuntimeError( f""Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported."" ) if args.resume: checkpoint = torch.load(args.resume, map_location=""cpu"") model_without_ddp.load_state_dict(checkpoint[""model""]) optimizer.load_state_dict(checkpoint[""optimizer""]) lr_scheduler.load_state_dict(checkpoint[""lr_scheduler""]) args.start_epoch = checkpoint[""epoch""] + 1 if args.amp: scaler.load_state_dict(checkpoint[""scaler""]) if args.test_only: evaluate(model, data_loader_test, device=device) return print(""Start training"") start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq, scaler) lr_scheduler.step() if args.output_dir: checkpoint = { ""model"": model_without_ddp.state_dict(), ""optimizer"": optimizer.state_dict(), ""lr_scheduler"": lr_scheduler.state_dict(), ""args"": args, ""epoch"": epoch, } if args.amp: checkpoint['scaler'] = scaler.state_dict() utils.save_on_master(checkpoint, os.path.join(args.output_dir, f""model_{epoch}.pth"")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, ""checkpoint.pth"")) # evaluate after every epoch evaluate(model, data_loader_test, device=device) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print(f""Training time {total_time_str}"") ","def main(args): if args.weights and PM is None: raise ImportError(""The prototype module couldn't be found. Please install the latest torchvision nightly."") if args.output_dir: utils.mkdir(args.output_dir) utils.init_distributed_mode(args) print(args) device = torch.device(args.device) # Data loading code print(""Loading data"") dataset, num_classes = get_dataset(args.dataset, ""train"", get_transform(True, args), args.data_path) dataset_test, _ = get_dataset(args.dataset, ""val"", get_transform(False, args), args.data_path) print(""Creating data loaders"") if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) else: train_sampler = torch.utils.data.RandomSampler(dataset) test_sampler = torch.utils.data.SequentialSampler(dataset_test) if args.aspect_ratio_group_factor >= 0: group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor) train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size) else: train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True) data_loader = torch.utils.data.DataLoader( dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=utils.collate_fn ) data_loader_test = torch.utils.data.DataLoader( dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn ) print(""Creating model"") kwargs = {""trainable_backbone_layers"": args.trainable_backbone_layers} if ""rcnn"" in args.model: if args.rpn_score_thresh is not None: kwargs[""rpn_score_thresh""] = args.rpn_score_thresh if not args.weights: model = torchvision.models.detection.__dict__[args.model]( pretrained=args.pretrained, num_classes=num_classes, **kwargs ) else: model = PM.detection.__dict__[args.model](weights=args.weights, num_classes=num_classes, **kwargs) model.to(device) if args.distributed and args.sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) scaler = torch.cuda.amp.GradScaler() if args.amp else None args.lr_scheduler = args.lr_scheduler.lower() if args.lr_scheduler == ""multisteplr"": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma) elif args.lr_scheduler == ""cosineannealinglr"": lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) else: raise RuntimeError( f""Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported."" ) if args.resume: checkpoint = torch.load(args.resume, map_location=""cpu"") model_without_ddp.load_state_dict(checkpoint[""model""]) optimizer.load_state_dict(checkpoint[""optimizer""]) lr_scheduler.load_state_dict(checkpoint[""lr_scheduler""]) args.start_epoch = checkpoint[""epoch""] + 1 if args.amp: scaler.load_state_dict(checkpoint[""scaler""]) if args.test_only: evaluate(model, data_loader_test, device=device) return print(""Start training"") start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq, scaler) lr_scheduler.step() if args.output_dir: checkpoint = { ""model"": model_without_ddp.state_dict(), ""optimizer"": optimizer.state_dict(), ""lr_scheduler"": lr_scheduler.state_dict(), ""args"": args, ""epoch"": epoch, } if args.amp: checkpoint[""scaler""] = scaler.state_dict() utils.save_on_master(checkpoint, os.path.join(args.output_dir, f""model_{epoch}.pth"")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, ""checkpoint.pth"")) # evaluate after every epoch evaluate(model, data_loader_test, device=device) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print(f""Training time {total_time_str}"") " 30819,"def enable_user_command(client, args): return enablee_disable_user_command(client, args, True) ","def enable_user_command(client, args): return enable_disable_user_command(client, args, True) " 33887,"def _workflow_wait_executor( func: Callable, context: ""WorkflowStepContext"", step_id: ""StepID"", baked_inputs: ""_BakedWorkflowInputs"", runtime_options: ""WorkflowStepRuntimeOptions"") -> Any: """"""Executor of 'workflow.wait' steps."""""" # Part 1: update the context for the step workflow_context.update_workflow_step_context(context, step_id) context = workflow_context.get_workflow_step_context() step_type = runtime_options.step_type assert step_type == StepType.WAIT wait_options = runtime_options.ray_options.get(""wait_options"", {}) # Part 2: wait inputs ready_workflows, remaining_workflows = baked_inputs.wait(**wait_options) ready_objects = [] for w in ready_workflows: obj, _, = _resolve_object_ref(w.ref.ref) ready_objects.append(obj) persisted_output = (ready_objects, remaining_workflows) volatile_output = None # Part 3: execute the step store = workflow_storage.get_workflow_storage() commit_step(store, step_id, persisted_output, exception=None) if context.last_step_of_workflow: # advance the progress of the workflow store.advance_progress(step_id) _record_step_status(step_id, WorkflowStatus.SUCCESSFUL) logger.info(get_step_status_info(WorkflowStatus.SUCCESSFUL)) return persisted_output, volatile_output ","def _workflow_wait_executor( func: Callable, context: ""WorkflowStepContext"", step_id: ""StepID"", baked_inputs: ""_BakedWorkflowInputs"", runtime_options: ""WorkflowStepRuntimeOptions"") -> Any: """"""Executor of 'workflow.wait' steps."""""" # Part 1: update the context for the step workflow_context.update_workflow_step_context(context, step_id) context = workflow_context.get_workflow_step_context() step_type = runtime_options.step_type assert step_type == StepType.WAIT wait_options = runtime_options.ray_options.get(""wait_options"", {}) # Part 2: Resolve any ready workflows. ready_workflows, remaining_workflows = baked_inputs.wait(**wait_options) ready_objects = [] for w in ready_workflows: obj, _, = _resolve_object_ref(w.ref.ref) ready_objects.append(obj) persisted_output = (ready_objects, remaining_workflows) volatile_output = None # Part 3: execute the step store = workflow_storage.get_workflow_storage() commit_step(store, step_id, persisted_output, exception=None) if context.last_step_of_workflow: # advance the progress of the workflow store.advance_progress(step_id) _record_step_status(step_id, WorkflowStatus.SUCCESSFUL) logger.info(get_step_status_info(WorkflowStatus.SUCCESSFUL)) return persisted_output, volatile_output " 31579,"def test_module(client): results = client._http_request('GET', 'ping', ok_codes=(200, 403)) if ""success"" in results: return ""ok"" else: return results['message'] ","def test_module(client): results = client._http_request('GET', 'ping', ok_codes=(200, 403)) if ""success"" in results: return ""ok"" else: return results.get('message') " 5160,"def test_countourf_legend_elements(): from matplotlib.patches import Rectangle x = np.arange(1, 10) y = x.reshape(-1, 1) h = x * y cs = plt.contourf(h, levels=[10, 30, 50], colors=['#FFFF00', '#FF00FF', '#00FFFF'], extend='both') cs.cmap.set_over('red') cs.cmap.set_under('blue') cs.changed() artists, labels = cs.legend_elements() assert labels == ['$x \\leq -1e+250s$', '$10.0 < x \\leq 30.0$', '$30.0 < x \\leq 50.0$', '$x > 1e+250s$'] expected_colors = ((0.0, 0.0, 1.0, 1.0), (1.0, 1.0, 0.0, 1.0), (1.0, 0.0, 1.0, 1.0), (1.0, 0.0, 0.0, 1.0)) assert all(isinstance(a, Rectangle) for a in artists) assert all(a.get_facecolor() == c for a, c in zip(artists, expected_colors)) ","def test_contourf_legend_elements(): from matplotlib.patches import Rectangle x = np.arange(1, 10) y = x.reshape(-1, 1) h = x * y cs = plt.contourf(h, levels=[10, 30, 50], colors=['#FFFF00', '#FF00FF', '#00FFFF'], extend='both') cs.cmap.set_over('red') cs.cmap.set_under('blue') cs.changed() artists, labels = cs.legend_elements() assert labels == ['$x \\leq -1e+250s$', '$10.0 < x \\leq 30.0$', '$30.0 < x \\leq 50.0$', '$x > 1e+250s$'] expected_colors = ((0.0, 0.0, 1.0, 1.0), (1.0, 1.0, 0.0, 1.0), (1.0, 0.0, 1.0, 1.0), (1.0, 0.0, 0.0, 1.0)) assert all(isinstance(a, Rectangle) for a in artists) assert all(a.get_facecolor() == c for a, c in zip(artists, expected_colors)) " 30493,"def get_indicator_type(indicator_type, item): """"""Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return 'File ' + item.get('Algorithm') elif indicator_type == 'domain': return 'Domain' elif indicator_type == 'url': return 'URL' ","def get_indicator_type(indicator_type, item): """"""Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """""" if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return FeedIndicatorType.File elif indicator_type == 'domain': return 'Domain' elif indicator_type == 'url': return 'URL' " 21559,"def create_requester( user_id, access_token_id=None, is_guest=False, shadow_banned=False, device_id=None, app_service=None, authenticated_entity=None, ): """""" Create a new ``Requester`` object Args: user_id (str|UserID): id of the user making the request access_token_id (int|None): *ID* of the access token used for this request, or None if it came via the appservice API or similar is_guest (bool): True if the user making this request is a guest user shadow_banned (bool): True if the user making this request is shadow-banned. device_id (str|None): device_id which was set at authentication time app_service (ApplicationService|None): the AS requesting on behalf of the user authenticated_entity: The entity that authenticatd when making the request, this is different than the user_id when an admin user or the server is ""puppeting"" the user. Returns: Requester """""" if not isinstance(user_id, UserID): user_id = UserID.from_string(user_id) if authenticated_entity is None: authenticated_entity = user_id.to_string() return Requester( user_id, access_token_id, is_guest, shadow_banned, device_id, app_service, authenticated_entity, ) ","def create_requester( user_id, access_token_id=None, is_guest=False, shadow_banned=False, device_id=None, app_service=None, authenticated_entity=None, ): """""" Create a new ``Requester`` object Args: user_id (str|UserID): id of the user making the request access_token_id (int|None): *ID* of the access token used for this request, or None if it came via the appservice API or similar is_guest (bool): True if the user making this request is a guest user shadow_banned (bool): True if the user making this request is shadow-banned. device_id (str|None): device_id which was set at authentication time app_service (ApplicationService|None): the AS requesting on behalf of the user authenticated_entity: The entity that authenticated when making the request, this is different than the user_id when an admin user or the server is ""puppeting"" the user. Returns: Requester """""" if not isinstance(user_id, UserID): user_id = UserID.from_string(user_id) if authenticated_entity is None: authenticated_entity = user_id.to_string() return Requester( user_id, access_token_id, is_guest, shadow_banned, device_id, app_service, authenticated_entity, ) " 36696,"def _determine_linux_fastcopy_blocksize(infd): """"""Determine blocksize for fastcopying on Linux. Hopefully the whole file will be copied in a single call. The copying itself should be performed in a loop 'till EOF is reached (0 return) so a blocksize smaller or bigger than the actual file size should not make any difference, also in case the file content changes while being copied. """""" try: blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB except OSError: blocksize = 2 ** 27 # 128MiB # On 32-bit architectures truncate to 1GiB to avoid OverflowError, # see bpo-38319. if sys.maxsize < 2 ** 32: blocksize = min(blocksize, 2 ** 30) return blocksize ","def _determine_linux_fastcopy_blocksize(infd): """"""Determine blocksize for fastcopying on Linux. Hopefully the whole file will be copied in a single call. The copying itself should be performed in a loop 'till EOF is reached (0 return) so a blocksize smaller or bigger than the actual file size should not make any difference, also in case the file content changes while being copied. """""" try: blocksize = max(os.fstat(infd).st_size, 2 ** 23) # min 8MiB except OSError: blocksize = 2 ** 27 # 128MiB # On 32-bit architectures truncate to 1GiB to avoid OverflowError, # see gh-82500. if sys.maxsize < 2 ** 32: blocksize = min(blocksize, 2 ** 30) return blocksize " 5383,"def test_present(): """""" Test to ensure that the named user is present with the specified properties. """""" dbname = ""my_test"" charset = ""utf8"" collate = ""utf8_unicode_ci"" ret = {""name"": dbname, ""result"": False, ""comment"": """", ""changes"": {}} mock_result = { ""character_set"": charset, ""collate"": collate, ""name"": dbname, } mock_result_alter_db = {True} mock = MagicMock(return_value=mock_result) mock_a = MagicMock(return_value=mock_result_alter_db) mock_failed = MagicMock(return_value=False) mock_err = MagicMock(return_value=""salt"") mock_no_err = MagicMock(return_value=None) mock_create = MagicMock(return_value=True) mock_create_failed = MagicMock(return_value=False) with patch.dict( mysql_database.__salt__, {""mysql.db_get"": mock, ""mysql.alter_db"": mock_a} ): mod_charset = ""ascii"" mod_collate = ""ascii_general_ci"" with patch.dict(mysql_database.__opts__, {""test"": True}): comt = [ ""Database character set {} != {} needs to be updated"".format( mod_charset, charset ), ""Database {} is going to be updated"".format(dbname), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": None}) assert mysql_database.present(dbname, character_set=mod_charset) == ret with patch.dict(mysql_database.__opts__, {""test"": True}): comt = [ ""Database {} is already present"".format(dbname), ""Database collate {} != {} needs to be updated"".format( mod_collate, collate ), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": None}) assert ( mysql_database.present( dbname, character_set=charset, collate=mod_collate ) == ret ) with patch.dict(mysql_database.__opts__, {}): comt = [ ""Database character set {} != {} needs to be updated"".format( mod_charset, charset ), ""Database collate {} != {} needs to be updated"".format( mod_collate, collate ), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": True}) assert ( mysql_database.present( dbname, character_set=mod_charset, collate=mod_collate ) == ret ) with patch.dict(mysql_database.__opts__, {""test"": False}): comt = ""Database {} is already present"".format(dbname) ret.update({""comment"": comt}) ret.update({""result"": True}) assert ( mysql_database.present(dbname, character_set=charset, collate=collate) == ret ) with patch.dict(mysql_database.__salt__, {""mysql.db_get"": mock_failed}): with patch.dict(mysql_database.__salt__, {""mysql.db_create"": mock_create}): with patch.object(mysql_database, ""_get_mysql_error"", mock_err): ret.update({""comment"": ""salt"", ""result"": False}) assert mysql_database.present(dbname) == ret with patch.object(mysql_database, ""_get_mysql_error"", mock_no_err): comt = ""The database {} has been created"".format(dbname) ret.update({""comment"": comt, ""result"": True}) ret.update({""changes"": {dbname: ""Present""}}) assert mysql_database.present(dbname) == ret with patch.dict( mysql_database.__salt__, {""mysql.db_create"": mock_create_failed} ): ret[""comment""] = """" with patch.object(mysql_database, ""_get_mysql_error"", mock_no_err): ret.update({""changes"": {}}) comt = ""Failed to create database {}"".format(dbname) ret.update({""comment"": comt, ""result"": False}) assert mysql_database.present(dbname) == ret ","def test_present(): """""" Test to ensure that the named database is present with the specified properties. """""" dbname = ""my_test"" charset = ""utf8"" collate = ""utf8_unicode_ci"" ret = {""name"": dbname, ""result"": False, ""comment"": """", ""changes"": {}} mock_result = { ""character_set"": charset, ""collate"": collate, ""name"": dbname, } mock_result_alter_db = {True} mock = MagicMock(return_value=mock_result) mock_a = MagicMock(return_value=mock_result_alter_db) mock_failed = MagicMock(return_value=False) mock_err = MagicMock(return_value=""salt"") mock_no_err = MagicMock(return_value=None) mock_create = MagicMock(return_value=True) mock_create_failed = MagicMock(return_value=False) with patch.dict( mysql_database.__salt__, {""mysql.db_get"": mock, ""mysql.alter_db"": mock_a} ): mod_charset = ""ascii"" mod_collate = ""ascii_general_ci"" with patch.dict(mysql_database.__opts__, {""test"": True}): comt = [ ""Database character set {} != {} needs to be updated"".format( mod_charset, charset ), ""Database {} is going to be updated"".format(dbname), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": None}) assert mysql_database.present(dbname, character_set=mod_charset) == ret with patch.dict(mysql_database.__opts__, {""test"": True}): comt = [ ""Database {} is already present"".format(dbname), ""Database collate {} != {} needs to be updated"".format( mod_collate, collate ), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": None}) assert ( mysql_database.present( dbname, character_set=charset, collate=mod_collate ) == ret ) with patch.dict(mysql_database.__opts__, {}): comt = [ ""Database character set {} != {} needs to be updated"".format( mod_charset, charset ), ""Database collate {} != {} needs to be updated"".format( mod_collate, collate ), ] ret.update({""comment"": ""\n"".join(comt)}) ret.update({""result"": True}) assert ( mysql_database.present( dbname, character_set=mod_charset, collate=mod_collate ) == ret ) with patch.dict(mysql_database.__opts__, {""test"": False}): comt = ""Database {} is already present"".format(dbname) ret.update({""comment"": comt}) ret.update({""result"": True}) assert ( mysql_database.present(dbname, character_set=charset, collate=collate) == ret ) with patch.dict(mysql_database.__salt__, {""mysql.db_get"": mock_failed}): with patch.dict(mysql_database.__salt__, {""mysql.db_create"": mock_create}): with patch.object(mysql_database, ""_get_mysql_error"", mock_err): ret.update({""comment"": ""salt"", ""result"": False}) assert mysql_database.present(dbname) == ret with patch.object(mysql_database, ""_get_mysql_error"", mock_no_err): comt = ""The database {} has been created"".format(dbname) ret.update({""comment"": comt, ""result"": True}) ret.update({""changes"": {dbname: ""Present""}}) assert mysql_database.present(dbname) == ret with patch.dict( mysql_database.__salt__, {""mysql.db_create"": mock_create_failed} ): ret[""comment""] = """" with patch.object(mysql_database, ""_get_mysql_error"", mock_no_err): ret.update({""changes"": {}}) comt = ""Failed to create database {}"".format(dbname) ret.update({""comment"": comt, ""result"": False}) assert mysql_database.present(dbname) == ret " 33524,"def process_apigateway_invocation( func_arn, path, payload, stage, api_id, headers=None, is_base64_encoded=False, resource_path=None, method=None, path_params=None, query_string_params=None, stage_variables=None, request_context=None, ): if path_params is None: path_params = {} if request_context is None: request_context = {} try: resource_path = resource_path or path event = construct_invocation_event( method, path, headers, payload, query_string_params, is_base64_encoded ) path_params = dict(path_params) fix_proxy_path_params(path_params) event[""pathParameters""] = path_params event[""resource""] = resource_path event[""requestContext""] = request_context event[""stageVariables""] = stage_variables LOG.debug( ""Running Lambda function %s from API Gateway invocation: %s %s"", func_arn, method or ""GET"", path, ) asynchronous = not config.SYNCHRONOUS_API_GATEWAY_EVENTS inv_result = run_lambda( func_arn=func_arn, event=event, context=request_context, asynchronous=asynchronous, ) return inv_result.result except Exception as e: LOG.warning( ""Unable to run Lambda function on API Gateway message: %s %s"", e, traceback.format_exc() ) ","def process_apigateway_invocation( func_arn, path, payload, stage, api_id, headers=None, is_base64_encoded=False, resource_path=None, method=None, path_params=None, query_string_params=None, stage_variables=None, request_context=None, ): if path_params is None: path_params = {} if request_context is None: request_context = {} try: resource_path = resource_path or path event = construct_invocation_event( method, path, headers, payload, query_string_params, is_base64_encoded ) path_params = dict(path_params) fix_proxy_path_params(path_params) event[""pathParameters""] = path_params event[""resource""] = resource_path event[""requestContext""] = request_context event[""stageVariables""] = stage_variables or {} LOG.debug( ""Running Lambda function %s from API Gateway invocation: %s %s"", func_arn, method or ""GET"", path, ) asynchronous = not config.SYNCHRONOUS_API_GATEWAY_EVENTS inv_result = run_lambda( func_arn=func_arn, event=event, context=request_context, asynchronous=asynchronous, ) return inv_result.result except Exception as e: LOG.warning( ""Unable to run Lambda function on API Gateway message: %s %s"", e, traceback.format_exc() ) " 6784,"def parse_email(communication, email_strings): """""" Parse email to add timeline links. When automatic email linking is enabled, an email from email_strings can contain a doctype and docname ie in the format `admin+doctype+docname@example.com`, the email is parsed and doctype and docname is extracted and timeline link is added. """""" delimiter = ""+"" for email_string in email_strings: if email_string: for email in email_string.split("",""): if delimiter in email: email_local_part = email.split(""@"")[0] local_parts = email_local_part.split(delimiter) if len(local_parts) == 3: doctype = unquote(local_parts[1]) docname = unquote(local_parts[2]) if doctype and docname and frappe.db.exists(doctype, docname): communication.add_link(doctype, docname) ","def parse_email(communication, email_strings): """""" Parse email to add timeline links. When automatic email linking is enabled, an email from email_strings can contain a doctype and docname ie in the format `admin+doctype+docname@example.com`, the email is parsed and doctype and docname is extracted and timeline link is added. """""" delimiter = ""+"" for email_string in email_strings: if email_string: for email in email_string.split("",""): if delimiter in email: email_username = email.split(""@"")[0] local_parts = email_local_part.split(delimiter) if len(local_parts) == 3: doctype = unquote(local_parts[1]) docname = unquote(local_parts[2]) if doctype and docname and frappe.db.exists(doctype, docname): communication.add_link(doctype, docname) " 31140,"def main(): params = demisto.params() client = Client(params.get('apikey'), params.get('url'), params.get('insecure', False), params.get('proxy', False)) LOG('RST: Client initialised...') commands = { 'rst-threat-feed-ip': ip_command, 'rst-threat-feed-domain': domain_command, 'rst-threat-feed-url': url_command, 'rst-threat-feed-submit': submit_command, 'rst-threat-feed-submit-fp': submitfp_command, } command = demisto.command() LOG(f'RST: Command being called is {command}') try: if command == 'test-module': demisto.results(test_module(client)) elif command in commands: return_outputs(*commands[command](client, demisto.args())) else: raise Exception('Command not found.') except Exception as e: return_error(f'Failed to execute {command} command. Error: {e}') ","def main(): params = demisto.params() client = Client(params.get('apikey'), params.get('url'), params.get('insecure', False), params.get('proxy', False)) LOG('RST: Client initialised...') commands = { 'rst-threat-feed-ip': ip_command, 'rst-threat-feed-domain': domain_command, 'rst-threat-feed-url': url_command, 'rst-threat-feed-submit': submit_command, 'rst-threat-feed-submit-fp': submitfp_command, } command = demisto.command() LOG(f'RST: Command being called is {command}') try: if command == 'test-module': return_results(test_module(client)) elif command in commands: return_outputs(*commands[command](client, demisto.args())) else: raise Exception('Command not found.') except Exception as e: return_error(f'Failed to execute {command} command. Error: {e}') " 22612,"def convert_rnn_outputs(model: Model, inputs_outputs: Tuple, is_train): Xp, (Ymxnet, _) = inputs_outputs def convert_for_mxnet_backward(dYp: Padded) -> ArgsKwargs: dYmxnet = xp2mxnet(dYp.data, requires_grad=True) return ArgsKwargs(args=(Ymxnet,), kwargs={""head_grads"": dYmxnet}) Y = cast(Array3d, mxnet2xp(Ymxnet)) Yp = Padded(Y, Xp.size_at_t, Xp.lengths, Xp.indices) return Yp, convert_for_mxnet_backward ","def convert_rnn_outputs(model: Model, inputs_outputs: Tuple, is_train): Xp, (Ymxnet, _) = inputs_outputs def convert_for_mxnet_backward(dYp: Padded) -> ArgsKwargs: dYmxnet = xp2mxnet(dYp.data, requires_grad=True) return ArgsKwargs(args=(Ymxnet,), kwargs={""head_grads"": dYmxnet}) Y = cast(Floats3d, mxnet2xp(Ymxnet)) Yp = Padded(Y, Xp.size_at_t, Xp.lengths, Xp.indices) return Yp, convert_for_mxnet_backward " 758,"def kurtosis_fractional_anisotropy(dki_params): r"""""" Computes the anisotropy of the kurtosis tensor (KFA) [1]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor Returns ------- kfa : array Calculated mean kurtosis tensor. Notes -------- The KFA is defined as [1]_: .. math:: KFA \equiv \frac{||\mathbf{W} - MKT \mathbf{I}^{(4)}||_F}{||\mathbf{W}||_F} where $W$ is the kurtosis tensor, MKT the kurtosis tensor mean, $I^(4)$ is the fully symmetric rank 2 isotropic tensor and $||...||_F$ is the tensor's Frobenius norm [1]_. References ---------- .. [1] Glenn, G. R., Helpern, J. A., Tabesh, A., and Jensen, J. H. (2015). Quantitative assessment of diffusional387kurtosis anisotropy. NMR in Biomedicine28, 448–459. doi:10.1002/nbm.3271 """""" Wxxxx = dki_params[..., 12] Wyyyy = dki_params[..., 13] Wzzzz = dki_params[..., 14] Wxxxy = dki_params[..., 15] Wxxxz = dki_params[..., 16] Wxyyy = dki_params[..., 17] Wyyyz = dki_params[..., 18] Wxzzz = dki_params[..., 19] Wyzzz = dki_params[..., 20] Wxxyy = dki_params[..., 21] Wxxzz = dki_params[..., 22] Wyyzz = dki_params[..., 23] Wxxyz = dki_params[..., 24] Wxyyz = dki_params[..., 25] Wxyzz = dki_params[..., 26] W = 1.0/5.0 * (Wxxxx + Wyyyy + Wzzzz + 2*Wxxyy + 2*Wxxzz + 2*Wyyzz) # Compute's equation numerator A = (Wxxxx - W) ** 2 + (Wyyyy - W) ** 2 + (Wzzzz - W) ** 2 + \ 4 * Wxxxy ** 2 + 4 * Wxxxz ** 2 + 4 * Wxyyy ** 2 + 4 * Wyyyz ** 2 + \ 4 * Wxzzz ** 2 + 4 * Wyzzz ** 2 + \ 6 * (Wxxyy - W/3) ** 2 + 6 * (Wxxzz - W/3) ** 2 + \ 6 * (Wyyzz - W/3) ** 2 + \ 12 * Wxxyz ** 2 + 12 * Wxyyz ** 2 + 12 * Wxyzz ** 2 # Compute's equation denominator B = Wxxxx ** 2 + Wyyyy ** 2 + Wzzzz ** 2 + 4 * Wxxxy ** 2 + \ 4 * Wxxxz ** 2 + 4 * Wxyyy ** 2 + 4 * Wyyyz ** 2 + 4 * Wxzzz ** 2 + \ 4 * Wyzzz ** 2 + 6 * Wxxyy ** 2 + 6 * Wxxzz ** 2 + 6 * Wyyzz ** 2 + \ 12 * Wxxyz ** 2 + 12 * Wxyyz ** 2 + 12 * Wxyzz ** 2 # Compute KFA KFA = np.zeros(A.shape) cond = B > 0 # Avoiding Singularity (if B = 0, KFA = 0) KFA[cond] = np.sqrt(A[cond]/B[cond]) return KFA ","def kurtosis_fractional_anisotropy(dki_params): r"""""" Computes the anisotropy of the kurtosis tensor (KFA) [1]_. Parameters ---------- dki_params : ndarray (x, y, z, 27) or (n, 27) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor Returns ------- kfa : array Calculated mean kurtosis tensor. Notes -------- The KFA is defined as [1]_: .. math:: KFA \equiv \frac{||\mathbf{W} - MKT \mathbf{I}^{(4)}||_F}{||\mathbf{W}||_F} where $W$ is the kurtosis tensor, MKT the kurtosis tensor mean, $I^(4)$ is the fully symmetric rank 2 isotropic tensor and $||...||_F$ is the tensor's Frobenius norm [1]_. References ---------- .. [1] Glenn, G. R., Helpern, J. A., Tabesh, A., and Jensen, J. H. (2015). Quantitative assessment of diffusional kurtosis anisotropy. NMR in Biomedicine28, 448–459. doi:10.1002/nbm.3271 """""" Wxxxx = dki_params[..., 12] Wyyyy = dki_params[..., 13] Wzzzz = dki_params[..., 14] Wxxxy = dki_params[..., 15] Wxxxz = dki_params[..., 16] Wxyyy = dki_params[..., 17] Wyyyz = dki_params[..., 18] Wxzzz = dki_params[..., 19] Wyzzz = dki_params[..., 20] Wxxyy = dki_params[..., 21] Wxxzz = dki_params[..., 22] Wyyzz = dki_params[..., 23] Wxxyz = dki_params[..., 24] Wxyyz = dki_params[..., 25] Wxyzz = dki_params[..., 26] W = 1.0/5.0 * (Wxxxx + Wyyyy + Wzzzz + 2*Wxxyy + 2*Wxxzz + 2*Wyyzz) # Compute's equation numerator A = (Wxxxx - W) ** 2 + (Wyyyy - W) ** 2 + (Wzzzz - W) ** 2 + \ 4 * Wxxxy ** 2 + 4 * Wxxxz ** 2 + 4 * Wxyyy ** 2 + 4 * Wyyyz ** 2 + \ 4 * Wxzzz ** 2 + 4 * Wyzzz ** 2 + \ 6 * (Wxxyy - W/3) ** 2 + 6 * (Wxxzz - W/3) ** 2 + \ 6 * (Wyyzz - W/3) ** 2 + \ 12 * Wxxyz ** 2 + 12 * Wxyyz ** 2 + 12 * Wxyzz ** 2 # Compute's equation denominator B = Wxxxx ** 2 + Wyyyy ** 2 + Wzzzz ** 2 + 4 * Wxxxy ** 2 + \ 4 * Wxxxz ** 2 + 4 * Wxyyy ** 2 + 4 * Wyyyz ** 2 + 4 * Wxzzz ** 2 + \ 4 * Wyzzz ** 2 + 6 * Wxxyy ** 2 + 6 * Wxxzz ** 2 + 6 * Wyyzz ** 2 + \ 12 * Wxxyz ** 2 + 12 * Wxyyz ** 2 + 12 * Wxyzz ** 2 # Compute KFA KFA = np.zeros(A.shape) cond = B > 0 # Avoiding Singularity (if B = 0, KFA = 0) KFA[cond] = np.sqrt(A[cond]/B[cond]) return KFA " 45983,"def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: r""""""Criterion that computes a surrogate binary intersection-over-union (IoU) loss. According to [2], we compute the IoU as follows: .. math:: \text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|} [1] approximates this fomular with a surrogate, which is fully differentable. Where: - :math:`X` expects to be the scores of each class. - :math:`Y` expects to be the binary tensor with the class labels. the loss, is finally computed as: .. math:: \text{loss}(x, class) = 1 - \text{IoU}(x, class) Reference: [1] http://proceedings.mlr.press/v37/yub15.pdf [2] https://arxiv.org/pdf/1705.08790.pdf . note:: This loss function only supports binary labels. For multi-class labels please use the Lovasz-Softmax loss. Args: input: logits tensor with shape :math:`(N, 1, H, W)`. labels: labels tensor with shape :math:`(N, H, W)` with binary values. Return: a scalar with the computed loss. Example: >>> N = 1 # num_classes >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = lovasz_hinge_loss(input, target) >>> output.backward() """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not isinstance(target, torch.Tensor): raise TypeError(f""Target type is not a torch.Tensor. Got {type(target)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect Bx1xHxW. Got: {input.shape}"") if not len(target.shape) == 3: raise ValueError(f""Invalid target shape, we expect BxHxW. Got: {target.shape}"") if not input.shape[1] == 1: raise ValueError(f""Invalid input shape, we expect Bx1xHxW. Got: {input.shape}"") if not input.shape[-2:] == target.shape[-2:]: raise ValueError(f""input and target shapes must be the same. Got: {input.shape} and {target.shape}"") if not input.device == target.device: raise ValueError(f""input and target must be in the same device. Got: {input.device} and {target.device}"") # flatten input and target [B, -1] and to float input_flatten: torch.Tensor = input.flatten(start_dim=1) target_flatten: torch.Tensor = target.flatten(start_dim=1).float() # get shapes B, N = input_flatten.shape # compute probabilities input_prob: torch.Tensor = torch.sigmoid(input_flatten) # compute actual loss signs = 2. * target_flatten - 1. errors = 1. - input_prob * signs errors_sorted, permutation = torch.sort(errors, dim=1, descending=True) batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0) target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)] target_sorted: torch.Tensor = target_sorted.view(B, N) target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True) intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1) union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1) gradient: torch.Tensor = 1. - intersection / union if N > 1: gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1] loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean() return loss ","def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: r""""""Criterion that computes a surrogate binary intersection-over-union (IoU) loss. According to [2], we compute the IoU as follows: .. math:: \text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|} [1] approximates this fomular with a surrogate, which is fully differentable. Where: - :math:`X` expects to be the scores of each class. - :math:`Y` expects to be the binary tensor with the class labels. the loss, is finally computed as: .. math:: \text{loss}(x, class) = 1 - \text{IoU}(x, class) Reference: [1] http://proceedings.mlr.press/v37/yub15.pdf [2] https://arxiv.org/pdf/1705.08790.pdf . note:: This loss function only supports binary labels. For multi-class labels please use the Lovasz-Softmax loss. Args: input: logits tensor with shape :math:`(N, 1, H, W)`. labels: labels tensor with shape :math:`(N, H, W)` with binary values. Return: a scalar with the computed loss. Example: >>> N = 1 # num_classes >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = lovasz_hinge_loss(input, target) >>> output.backward() """""" if not isinstance(input, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(input)}"") if not isinstance(target, torch.Tensor): raise TypeError(f""Target type is not a torch.Tensor. Got {type(target)}"") if not len(input.shape) == 4: raise ValueError(f""Invalid input shape, we expect Bx1xHxW. Got: {input.shape}"") if not len(target.shape) == 3: raise ValueError(f""Invalid target shape, we expect BxHxW. Got: {target.shape}"") if not input.shape[1] == 1: raise ValueError(f""Invalid input shape, we expect Bx1xHxW. Got: {input.shape}"") if not input.shape[-2:] == target.shape[-2:]: raise ValueError(f""input and target shapes must be the same. Got: {input.shape} and {target.shape}"") if not input.device == target.device: raise ValueError(f""input and target must be in the same device. Got: {input.device} and {target.device}"") # flatten input and target [B, -1] and to float input_flatten: Tensor = input.reshape(B, -1) target_flatten: torch.Tensor = target.flatten(start_dim=1).float() # get shapes B, N = input_flatten.shape # compute probabilities input_prob: torch.Tensor = torch.sigmoid(input_flatten) # compute actual loss signs = 2. * target_flatten - 1. errors = 1. - input_prob * signs errors_sorted, permutation = torch.sort(errors, dim=1, descending=True) batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0) target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)] target_sorted: torch.Tensor = target_sorted.view(B, N) target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True) intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1) union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1) gradient: torch.Tensor = 1. - intersection / union if N > 1: gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1] loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean() return loss " 35824,"def mark_framework_limitation(test_id, reason): # The purpose of this function is to have a single entry point for skip marks that are only there, because the test # framework cannot handle the kernel in general or a specific parameter combination. # As development progresses, we can change the `mark.skip` to `mark.xfail` from time to time to see if the skip is # still justified. # We don't want to use `mark.xfail` all the time, because that actually runs the test until an error happens. Thus, # we are wasting CI resources for no reason for most of the time return TestMark(test_id, pytest.mark.skip(reason=reason)) ","def mark_framework_limitation(test_id, reason): # The purpose of this function is to have a single entry point for skip marks that are only there, because the test # framework cannot handle the kernel in general or a specific parameter combination. # As development progresses, we can change the `mark.skip` to `mark.xfail` from time to time to see if the skip is # still justified. # We don't want to use `mark.xfail` all the time, because that actually runs the test until an error happens. Thus, # we are wasting CI resources for no reason for most of the time return TestMark(test_id, pytest.mark.xfail(reason=reason)) " 4276,"def _add_interpolator(sp): """"""Compute a sparse matrix to interpolate the data into an MRI volume."""""" # extract transformation information from mri s = sp[0] mri_width, mri_height, mri_depth, nvox = _src_vol_dims(s) # # Convert MRI voxels from destination (MRI volume) to source (volume # source space subset) coordinates # combo_trans = combine_transforms(s['vox_mri_t'], invert_transform(s['src_mri_t']), 'mri_voxel', 'mri_voxel') logger.info('Setting up volume interpolation ...') interp = _grid_interp(s['vol_dims'], (mri_width, mri_height, mri_depth), combo_trans['trans']) # Compose the sparse matrices for si, s in enumerate(sp): # limit it columns that have any contribution from inuse any_ = sparse.diags( np.asarray( interp[:, s['inuse'].astype(bool)].sum(1) )[:, 0].astype(bool).astype(float) ) s['interpolator'] = any_ * interp logger.info(' %d/%d nonzero values for %s' % (len(s['interpolator'].data), nvox, s['seg_name'])) logger.info('[done]') ","def _add_interpolator(sp): """"""Compute a sparse matrix to interpolate the data into an MRI volume."""""" # extract transformation information from mri s = sp[0] mri_width, mri_height, mri_depth, nvox = _src_vol_dims(s) # # Convert MRI voxels from destination (MRI volume) to source (volume # source space subset) coordinates # combo_trans = combine_transforms(s['vox_mri_t'], invert_transform(s['src_mri_t']), 'mri_voxel', 'mri_voxel') logger.info('Setting up volume interpolation ...') interp = _grid_interp(s['vol_dims'], (mri_width, mri_height, mri_depth), combo_trans['trans']) # Compose the sparse matrices for si, s in enumerate(sp): # limit it columns that have any contribution from inuse any_ = sparse.diags( np.asarray( interp[:, s['inuse'].astype(bool)].sum(1) )[:, 0].astype(bool).astype(float) ) s['interpolator'] = any_ @ interp logger.info(' %d/%d nonzero values for %s' % (len(s['interpolator'].data), nvox, s['seg_name'])) logger.info('[done]') " 31944,"def censys_search_command(): args = demisto.args() query = args.get('query') index = args.get('index') page = int(args.get('page')) url_suffix = 'search/{0}'.format(index) data = { ""query"": query, ""page"": page } raw = send_request('POST', url_suffix, json.dumps(data)) readable = tableToMarkdown(""Search results for {0} in {1} - page {2}"".format(query, index, page), raw[""results""]) results = {'Censys': { 'Search': { 'metadata': raw['metadata'], 'response': raw['results'] } } } return_outputs(readable, results) ","def censys_search_command(): args = demisto.args() query = args.get('query') index = args.get('index') page = int(args.get('page')) url_suffix = 'search/{0}'.format(index) data = { ""query"": query, ""page"": page } raw = send_request('POST', url_suffix, json.dumps(data)) readable = tableToMarkdown(""Search results for {0} in {1} - page {2}"".format(query, index, page), raw[""results""]) return_outputs(readable, raw) " 1330,"def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linear_model', parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension('cd_fast', sources=['cd_fast.pyx'], include_dirs=numpy.get_include(), libraries=libraries) config.add_extension('sgd_fast', sources=['sgd_fast.pyx'], include_dirs=numpy.get_include(), libraries=libraries) # generate sag_fast from template sag_cython_file = 'sklearn/linear_model/sag_fast.pyx.tp' sag_file = sag_cython_file.replace('.tp', '') if not (os.path.exists(sag_file) and os.stat(sag_cython_file).st_mtime < os.stat(sag_file).st_mtime): with open(sag_cython_file, ""r"") as f: tmpl = f.read() from Cython import Tempita tmpl_ = Tempita.sub(tmpl) with open(sag_file, ""w"") as f: f.write(tmpl_) config.add_extension('sag_fast', sources=['sag_fast.pyx'], include_dirs=numpy.get_include()) # add other directories config.add_subpackage('tests') return config ","def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linear_model', parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension('cd_fast', sources=['cd_fast.pyx'], include_dirs=numpy.get_include(), libraries=libraries) config.add_extension('sgd_fast', sources=['sgd_fast.pyx'], include_dirs=numpy.get_include(), libraries=libraries) # generate sag_fast from template sag_cython_file = 'sklearn/linear_model/sag_fast.pyx.tp' sag_file = sag_cython_file.replace('.tp', '') if not (os.path.exists(sag_file) and os.stat(sag_cython_file).st_mtime < os.stat(sag_file).st_mtime): with open(sag_cython_file, ""r"") as f: tmpl = f.read() from Cython import Tempita # noqa tmpl_ = Tempita.sub(tmpl) with open(sag_file, ""w"") as f: f.write(tmpl_) config.add_extension('sag_fast', sources=['sag_fast.pyx'], include_dirs=numpy.get_include()) # add other directories config.add_subpackage('tests') return config " 20287,"def add_arguments(parser: 'argparse.ArgumentParser') -> None: """"""Add compile specific arguments."""""" parser.add_argument( '-j', '--jobs', action='store', default=0, type=int, help='The number of worker jobs to run (if supported). If the value is less than 1 the build program will guess.' ) parser.add_argument( '-l', '--load-average', action='store', default=0, type=int, help='The system load average to try to maintain (if supported)' ) parser.add_argument( '--clean', action='store_true', help='Clean the build directory.' ) parser.add_argument( '-C', action='store', dest='builddir', type=Path, default='.', help='The directory containing build files to be built.' ) parser.add_argument( '-v', '--verbose', action='store_true', help='Show more verbose output.' ) ","def add_arguments(parser: 'argparse.ArgumentParser') -> None: """"""Add compile specific arguments."""""" parser.add_argument( '-j', '--jobs', action='store', default=0, type=int, help='The number of worker jobs to run (if supported). If the value is less than 1 the build program will guess.' ) parser.add_argument( '-l', '--load-average', action='store', default=0, type=int, help='The system load average to try to maintain (if supported)' ) parser.add_argument( '--clean', action='store_true', help='Clean the build directory.' ) parser.add_argument( '-C', action='store', dest='builddir', type=Path, default='.', help='The directory containing build files to be built.' ) parser.add_argument( '--verbose', action='store_true', help='Show more verbose output.' ) " 28368,"def annotate_documents( corpus: Corpus, embedding: np.ndarray, clustering_method: int, n_components: Optional[int] = None, epsilon: Optional[float] = None, cluster_labels: Optional[np.ndarray] = None, fdr_threshold: float = 0.05, n_words_in_cluster: int = 10, progress_callback: Optional[Callable] = None ) -> Tuple[np.ndarray, Dict[int, ClusterType], ScoresType]: """""" Annotate documents in corpus, by performing clustering on the corpus and assigning characteristic terms to each cluster using Hypergeometric distribution. Return annotated clusters - for each cluster return a list of keywords with scores, cluster center coordinates and concave_hulls coordinates. Also return optimal values for n_components/epsilon if calculated and scores data (p-values and counts for all keywords). Parameters ---------- corpus : Corpus Corpus to be annotated. embedding : np.ndarray of size len(corpus) × 2 Usually tSNE projection of BoW of corpus. clustering_method : int 0 for DBSCAN 1 for Gaussian mixture models 2 for custom clustering where cluster_labels are used n_components: int, optional, default = None Number of clusters for Gaussian mixture models. If None, set to the number of clusters with maximal silhouette. epsilon : float, optional, default = None epsilon for DBSCAN. If None, optimal value is computed. cluster_labels : np.ndarray, optional Custom cluster labels. Usually included in corpus. fdr_threshold : float, optional, default = 0.05 hypergeom_p_values threshold n_words_in_cluster : int, optional, default = 10 Number of characteristic terms in each cluster. progress_callback : callable, optional Progress callback. Returns ------- cluster_labels : np.ndarray of size len(corpus) An array of floats (i.e. 0, 1, np.nan) that represent cluster labels for all documents in the corpus. clusters : dict Dictionary of keywords with scores, centroids and concave hulls for each cluster. n_components : int Optimal number of clusters for Gaussian mixture models, if the n_components is None, and clustering_method is ClusterDocuments.GAUSSIAN_MIXTURE. n_components otherwise. epsilon : float Optimal value for epsilon for DBSCAN, if the epsilon is None, and clustering_method is ClusterDocuments.DBSCAN. epsilon otherwise. scores : tuple Tuple of all keywords with p-values and counts. Raises ------ ValueError when there are no clusters in the embedding. """""" if progress_callback is None: progress_callback = dummy_callback if clustering_method == ClusterDocuments.GAUSSIAN_MIXTURE: if n_components is None: n_components = ClusterDocuments.gmm_compute_n_components( embedding, wrap_callback(progress_callback, end=0.3) ) n_components = min([n_components, len(embedding)]) cluster_labels = ClusterDocuments.gmm( embedding, n_components=n_components, threshold=0.6 ) elif clustering_method == ClusterDocuments.DBSCAN: if epsilon is None: epsilon = ClusterDocuments.dbscan_compute_epsilon(embedding) cluster_labels = ClusterDocuments.dbscan( embedding, eps=epsilon ) else: assert cluster_labels is not None cluster_labels[np.isnan(cluster_labels)] = -1 if len(set(cluster_labels) - {-1}) == 0: raise ValueError(""There are no clusters using current settings."") keywords = _get_characteristic_terms( corpus, n_keywords=20, progress_callback=wrap_callback(progress_callback, start=0.5) ) clusters_keywords, all_keywords, scores, p_values = \ _hypergeom_clusters(cluster_labels, keywords, fdr_threshold, n_words_in_cluster) concave_hulls = compute_concave_hulls(embedding, cluster_labels, epsilon) centroids = {c: tuple(np.mean(concave_hulls[c], axis=0)) for c in set(cluster_labels) - {-1}} clusters = {int(key): ( clusters_keywords[key], centroids[key], concave_hulls[key] ) for key in clusters_keywords} cluster_labels = cluster_labels.astype(float) cluster_labels[cluster_labels == -1] = np.nan scores = (all_keywords, scores, p_values) return cluster_labels, clusters, n_components, epsilon, scores ","def annotate_documents( corpus: Corpus, embedding: np.ndarray, clustering_method: int, n_components: Optional[int] = None, epsilon: Optional[float] = None, cluster_labels: Optional[np.ndarray] = None, fdr_threshold: float = 0.05, n_words_in_cluster: int = 10, progress_callback: Optional[Callable] = None ) -> Tuple[np.ndarray, Dict[int, ClusterType], int, float, ScoresType]: """""" Annotate documents in corpus, by performing clustering on the corpus and assigning characteristic terms to each cluster using Hypergeometric distribution. Return annotated clusters - for each cluster return a list of keywords with scores, cluster center coordinates and concave_hulls coordinates. Also return optimal values for n_components/epsilon if calculated and scores data (p-values and counts for all keywords). Parameters ---------- corpus : Corpus Corpus to be annotated. embedding : np.ndarray of size len(corpus) × 2 Usually tSNE projection of BoW of corpus. clustering_method : int 0 for DBSCAN 1 for Gaussian mixture models 2 for custom clustering where cluster_labels are used n_components: int, optional, default = None Number of clusters for Gaussian mixture models. If None, set to the number of clusters with maximal silhouette. epsilon : float, optional, default = None epsilon for DBSCAN. If None, optimal value is computed. cluster_labels : np.ndarray, optional Custom cluster labels. Usually included in corpus. fdr_threshold : float, optional, default = 0.05 hypergeom_p_values threshold n_words_in_cluster : int, optional, default = 10 Number of characteristic terms in each cluster. progress_callback : callable, optional Progress callback. Returns ------- cluster_labels : np.ndarray of size len(corpus) An array of floats (i.e. 0, 1, np.nan) that represent cluster labels for all documents in the corpus. clusters : dict Dictionary of keywords with scores, centroids and concave hulls for each cluster. n_components : int Optimal number of clusters for Gaussian mixture models, if the n_components is None, and clustering_method is ClusterDocuments.GAUSSIAN_MIXTURE. n_components otherwise. epsilon : float Optimal value for epsilon for DBSCAN, if the epsilon is None, and clustering_method is ClusterDocuments.DBSCAN. epsilon otherwise. scores : tuple Tuple of all keywords with p-values and counts. Raises ------ ValueError when there are no clusters in the embedding. """""" if progress_callback is None: progress_callback = dummy_callback if clustering_method == ClusterDocuments.GAUSSIAN_MIXTURE: if n_components is None: n_components = ClusterDocuments.gmm_compute_n_components( embedding, wrap_callback(progress_callback, end=0.3) ) n_components = min([n_components, len(embedding)]) cluster_labels = ClusterDocuments.gmm( embedding, n_components=n_components, threshold=0.6 ) elif clustering_method == ClusterDocuments.DBSCAN: if epsilon is None: epsilon = ClusterDocuments.dbscan_compute_epsilon(embedding) cluster_labels = ClusterDocuments.dbscan( embedding, eps=epsilon ) else: assert cluster_labels is not None cluster_labels[np.isnan(cluster_labels)] = -1 if len(set(cluster_labels) - {-1}) == 0: raise ValueError(""There are no clusters using current settings."") keywords = _get_characteristic_terms( corpus, n_keywords=20, progress_callback=wrap_callback(progress_callback, start=0.5) ) clusters_keywords, all_keywords, scores, p_values = \ _hypergeom_clusters(cluster_labels, keywords, fdr_threshold, n_words_in_cluster) concave_hulls = compute_concave_hulls(embedding, cluster_labels, epsilon) centroids = {c: tuple(np.mean(concave_hulls[c], axis=0)) for c in set(cluster_labels) - {-1}} clusters = {int(key): ( clusters_keywords[key], centroids[key], concave_hulls[key] ) for key in clusters_keywords} cluster_labels = cluster_labels.astype(float) cluster_labels[cluster_labels == -1] = np.nan scores = (all_keywords, scores, p_values) return cluster_labels, clusters, n_components, epsilon, scores " 30610,"def main(): circle_aritfact = sys.argv[1] env_file = sys.argv[2] with open(env_file, 'r') as json_file: env_results = json.load(json_file) for env in env_results: print(f'Downloading server log from {env[""Role""]}') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '""sudo chmod -R 755 /var/log/demisto""' scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}@{}:/var/log/demisto/server.log {} || echo ""WARN: Failed downloading server.log""' try: subprocess.check_output( ssh_string.format(env[""SSHuser""], env[""InstanceDNS""]), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) try: subprocess.check_output( scp_string.format( env[""SSHuser""], env[""InstanceDNS""], ""{}/server_{}.log"".format(circle_aritfact, env[""Role""].replace(' ', ''))), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) if os.path.isfile(""./Tests/is_build_passed_{}.txt"".format(env[""Role""].replace(' ', ''))): print(f'Destroying instance {env[""Role""]}') rminstance = aws_functions.destroy_instance(env[""Region""], env[""InstanceID""]) if aws_functions.isError(rminstance): print_error(rminstance) else: print_warning(f'Tests failed on {env[""Role""]} ,keeping instance alive') ","def main(): circle_aritfact = sys.argv[1] env_file = sys.argv[2] with open(env_file, 'r') as json_file: env_results = json.load(json_file) for env in env_results: print(f'Downloading server log from {env.get(""Role"", ""Unknown role"")}') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '""sudo chmod -R 755 /var/log/demisto""' scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}@{}:/var/log/demisto/server.log {} || echo ""WARN: Failed downloading server.log""' try: subprocess.check_output( ssh_string.format(env[""SSHuser""], env[""InstanceDNS""]), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) try: subprocess.check_output( scp_string.format( env[""SSHuser""], env[""InstanceDNS""], ""{}/server_{}.log"".format(circle_aritfact, env[""Role""].replace(' ', ''))), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) if os.path.isfile(""./Tests/is_build_passed_{}.txt"".format(env[""Role""].replace(' ', ''))): print(f'Destroying instance {env[""Role""]}') rminstance = aws_functions.destroy_instance(env[""Region""], env[""InstanceID""]) if aws_functions.isError(rminstance): print_error(rminstance) else: print_warning(f'Tests failed on {env[""Role""]} ,keeping instance alive') " 56290,"def brodcast_data(rank, data, rank_src=0): if rank<0: return data device = torch.device(""cuda:{}"".format(rank)) # send starts to other processes if rank == rank_src: data_buf = pickle.dumps(data) data_storage = torch.ByteStorage.from_buffer(data_buf) data_tensor = torch.ByteTensor(data_storage).to(device) size_tensor = torch.tensor([data_tensor.numel()], dtype=torch.long, device=device) printlog(""Pack data by {} process and sent to other processes {} bytes"".format(rank_src, data_tensor.numel())) else: size_tensor = torch.tensor([0], dtype=torch.long, device=device) torch.distributed.broadcast(size_tensor, rank_src) if rank != rank_src: data_tensor = torch.empty((size_tensor.item(),), dtype=torch.uint8, device=device) torch.distributed.broadcast(data_tensor, rank_src) if rank != rank_src: printlog( ""Receive data and unpack {} bytes by {} process"".format(size_tensor.item(), rank)) data_buf = data_tensor.cpu().numpy().tobytes() data = pickle.loads(data_buf) torch.distributed.barrier() return data","def brodcast_data(rank, data, rank_src=0): if rank<0: return data device = torch.device(""cuda:{}"".format(rank)) # send starts to other processes if rank == rank_src: data_buf = pickle.dumps(data) data_storage = torch.ByteStorage.from_buffer(data_buf) data_tensor = torch.ByteTensor(data_storage).to(device) size_tensor = torch.tensor([data_tensor.numel()], dtype=torch.long, device=device) printlog(""Pack data by {} process and sent to other processes {} bytes"".format(rank_src, data_tensor.numel())) else: size_tensor = torch.tensor([0], dtype=torch.long, device=device) torch.distributed.broadcast(size_tensor, rank_src) if rank != rank_src: data_tensor = torch.empty((size_tensor.item(),), dtype=torch.uint8, device=device) torch.distributed.broadcast(data_tensor, rank_src) if rank != rank_src: printlog( ""Receive data and unpack {} bytes by {} process"".format(size_tensor.item(), rank)) data_buf = data_tensor.cpu().numpy().tobytes() data = pickle.loads(data_buf) torch.distributed.barrier() return data " 32631,"def install_custom_pack(pack_id: str, skip_verify: bool, skip_validation: bool, instance_name: str = '') -> Tuple[bool, str]: """"""Installs a custom pack in the machine. Args: pack_id (str): The ID of the pack to install. skip_verify (bool): If true will skip pack signature validation. skip_validation (bool) if true will skip all pack validations. instance_name (str) Demisto REST API instance name. Returns: - bool. Whether the installation of the pack was successful or not. - str. In case of failure, the error message. Notes: Assumptions: The zipped file is in the war-room, and the context includes the data related to it. """""" pack_file_entry_id = '' instance_context = demisto.context() context_files = instance_context.get('File', []) if not isinstance(context_files, list): context_files = [context_files] for file_in_context in context_files: if file_in_context['Name'].split('/')[-1] == f'{pack_id}.zip': pack_file_entry_id = file_in_context['EntryID'] break uri = build_url_parameters(skip_verify=skip_verify, skip_validation=skip_validation) if pack_file_entry_id: args = {'uri': uri, 'entryID': pack_file_entry_id} if instance_name: args['using'] = instance_name status, res = execute_command( 'demisto-api-multipart', args, fail_on_error=False, ) if not status: error_message = f'{SCRIPT_NAME} - {res}' demisto.debug(error_message) return False, f'Issue occurred while installing the pack on the machine.\n{res}' else: error_message = 'Could not find file entry ID.' demisto.debug(f'{SCRIPT_NAME}, ""{pack_id}"" - {error_message}.') return False, error_message return True, '' ","def install_custom_pack(pack_id: str, skip_verify: bool, skip_validation: bool, instance_name: str = '') -> Tuple[bool, str]: """"""Installs a custom pack in the machine. Args: pack_id (str): The ID of the pack to install. skip_verify (bool): If true will skip pack signature validation. skip_validation (bool) if true will skip all pack validations. instance_name (str) Demisto REST API instance name. Returns: - bool. Whether the installation of the pack was successful or not. - str. In case of failure, the error message. Notes: Assumptions: The zipped file is in the war-room, and the context includes the data related to it. """""" pack_file_entry_id = '' instance_context = demisto.context() context_files = instance_context.get('File', []) if not isinstance(context_files, list): context_files = [context_files] for file_in_context in context_files: if file_in_context['Name'].split('/')[-1] == f'{pack_id}.zip': pack_file_entry_id = file_in_context.get('EntryID') break uri = build_url_parameters(skip_verify=skip_verify, skip_validation=skip_validation) if pack_file_entry_id: args = {'uri': uri, 'entryID': pack_file_entry_id} if instance_name: args['using'] = instance_name status, res = execute_command( 'demisto-api-multipart', args, fail_on_error=False, ) if not status: error_message = f'{SCRIPT_NAME} - {res}' demisto.debug(error_message) return False, f'Issue occurred while installing the pack on the machine.\n{res}' else: error_message = 'Could not find file entry ID.' demisto.debug(f'{SCRIPT_NAME}, ""{pack_id}"" - {error_message}.') return False, error_message return True, '' " 34438,"def _log_evaluation_table( golds: List[Any], name: Text, report: Dict[Text, Any], precision: float, f1: float, accuracy: float, in_training_data_fraction: float, include_report: bool = True, ): # pragma: no cover """"""Log the sklearn evaluation metrics."""""" logger.info(f""Evaluation Results on {name} level:"") logger.info(f""\tCorrect: {int(len(golds) * accuracy)} / {len(golds)}"") logger.info(f""\tF1-Score: {f1:.3f}"") logger.info(f""\tPrecision: {precision:.3f}"") logger.info(f""\tAccuracy: {accuracy:.3f}"") logger.info(f""\tIn-data fraction: {in_training_data_fraction:.3g}"") if include_report: logger.info(f""\tClassification report: \n{report}"") ","def _log_evaluation_table( golds: List[Any], name: Text, report: Dict[Text, Any], precision: float, f1: float, accuracy: float, in_training_data_fraction: float, include_report: bool = True, ) -> None: # pragma: no cover """"""Log the sklearn evaluation metrics."""""" logger.info(f""Evaluation Results on {name} level:"") logger.info(f""\tCorrect: {int(len(golds) * accuracy)} / {len(golds)}"") logger.info(f""\tF1-Score: {f1:.3f}"") logger.info(f""\tPrecision: {precision:.3f}"") logger.info(f""\tAccuracy: {accuracy:.3f}"") logger.info(f""\tIn-data fraction: {in_training_data_fraction:.3g}"") if include_report: logger.info(f""\tClassification report: \n{report}"") " 49247,"def _get_timezone_name(timezone): """""" Return the offset for fixed offset timezones or else the name of ``timezone``. """""" return timezone.tzname(None) or str(timezone) ","def _get_timezone_name(timezone): """""" Return the offset for fixed offset timezones, or the name of timezone if not set. """""" return timezone.tzname(None) or str(timezone) " 53,"def run_solr_query(param=None, rows=100, page=1, sort=None, spellcheck_count=None, offset=None, fields=None, facet=True): if param is None: param = {} # use page when offset is not specified if offset is None: offset = rows * (page - 1) (q_list, use_dismax) = build_q_list(param) params = [ ('fl', ','.join(fields or [ 'key', 'author_name', 'author_key', 'title', 'subtitle', 'edition_count', 'ia', 'has_fulltext', 'first_publish_year', 'cover_i', 'cover_edition_key', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s', 'language', 'ia_collection_s'])), ('fq', 'type:work'), ('q.op', 'AND'), ('start', offset), ('rows', rows), ] if spellcheck_count is None: spellcheck_count = default_spellcheck_count if spellcheck_count: params.append(('spellcheck', 'true')) params.append(('spellcheck.count', spellcheck_count)) if facet: params.append(('facet', 'true')) for facet in FACET_FIELDS: params.append(('facet.field', facet)) if q_list: if use_dismax: params.append(('q', ' '.join(q_list))) params.append(('defType', 'dismax')) params.append(('qf', 'text title^5 author_name^5')) params.append(('bf', 'sqrt(edition_count)^10')) else: params.append(('q', ' '.join(q_list + ['_val_:""sqrt(edition_count)""^10']))) if 'public_scan' in param: v = param.pop('public_scan').lower() if v in ('true', 'false'): if v == 'false': # also constrain on print disabled since the index may not be in sync param.setdefault('print_disabled', 'false') params.append(('fq', 'public_scan_b:%s' % v)) if 'print_disabled' in param: v = param.pop('print_disabled').lower() if v in ('true', 'false'): minus = '-' if v == 'false' else '' params.append(('fq', '%ssubject_key:protected_daisy' % minus)) if 'has_fulltext' in param: v = param['has_fulltext'].lower() if v not in ('true', 'false'): del param['has_fulltext'] params.append(('fq', 'has_fulltext:%s' % v)) for field in FACET_FIELDS: if field == 'has_fulltext': continue if field == 'author_facet': field = 'author_key' if field not in param: continue values = param[field] params += [('fq', '%s:""%s""' % (field, val)) for val in values if val] if sort: params.append(('sort', sort)) params.append(('wt', param.get('wt', 'standard'))) url = solr_select_url + '?' + urlencode(params) solr_result = execute_solr_query(url) if solr_result is None: return (None, url, q_list) reply = solr_result.read() return (reply, url, q_list) ","def run_solr_query(param=None, rows=100, page=1, sort=None, spellcheck_count=None, offset=None, fields=None, facet=True): param = param or {} # use page when offset is not specified if offset is None: offset = rows * (page - 1) (q_list, use_dismax) = build_q_list(param) params = [ ('fl', ','.join(fields or [ 'key', 'author_name', 'author_key', 'title', 'subtitle', 'edition_count', 'ia', 'has_fulltext', 'first_publish_year', 'cover_i', 'cover_edition_key', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s', 'language', 'ia_collection_s'])), ('fq', 'type:work'), ('q.op', 'AND'), ('start', offset), ('rows', rows), ] if spellcheck_count is None: spellcheck_count = default_spellcheck_count if spellcheck_count: params.append(('spellcheck', 'true')) params.append(('spellcheck.count', spellcheck_count)) if facet: params.append(('facet', 'true')) for facet in FACET_FIELDS: params.append(('facet.field', facet)) if q_list: if use_dismax: params.append(('q', ' '.join(q_list))) params.append(('defType', 'dismax')) params.append(('qf', 'text title^5 author_name^5')) params.append(('bf', 'sqrt(edition_count)^10')) else: params.append(('q', ' '.join(q_list + ['_val_:""sqrt(edition_count)""^10']))) if 'public_scan' in param: v = param.pop('public_scan').lower() if v in ('true', 'false'): if v == 'false': # also constrain on print disabled since the index may not be in sync param.setdefault('print_disabled', 'false') params.append(('fq', 'public_scan_b:%s' % v)) if 'print_disabled' in param: v = param.pop('print_disabled').lower() if v in ('true', 'false'): minus = '-' if v == 'false' else '' params.append(('fq', '%ssubject_key:protected_daisy' % minus)) if 'has_fulltext' in param: v = param['has_fulltext'].lower() if v not in ('true', 'false'): del param['has_fulltext'] params.append(('fq', 'has_fulltext:%s' % v)) for field in FACET_FIELDS: if field == 'has_fulltext': continue if field == 'author_facet': field = 'author_key' if field not in param: continue values = param[field] params += [('fq', '%s:""%s""' % (field, val)) for val in values if val] if sort: params.append(('sort', sort)) params.append(('wt', param.get('wt', 'standard'))) url = solr_select_url + '?' + urlencode(params) solr_result = execute_solr_query(url) if solr_result is None: return (None, url, q_list) reply = solr_result.read() return (reply, url, q_list) " 5721,"def cont2discrete(system, dt, method=""zoh"", alpha=None): """""" Transform a continuous to a discrete state-space system. Parameters ---------- system : a tuple describing the system or an instance of `lti` The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) dt : float The discretization time step. method : str, optional Which method to use: * gbt: generalized bilinear transformation * bilinear: Tustin's approximation (""gbt"" with alpha=0.5) * euler: Euler (or forward differencing) method (""gbt"" with alpha=0) * backward_diff: Backwards differencing (""gbt"" with alpha=1.0) * zoh: zero-order hold (default) * foh: first-order hold (*versionadded: 1.3.0*) * impulse: equivalent impulse response (*versionadded: 1.3.0*) alpha : float within [0, 1], optional The generalized bilinear transformation weighting parameter, which should only be specified with method=""gbt"", and is ignored otherwise Returns ------- sysd : tuple containing the discrete system Based on the input type, the output will be of the form * (num, den, dt) for transfer function input * (zeros, poles, gain, dt) for zeros-poles-gain input * (A, B, C, D, dt) for state-space system input Notes ----- By default, the routine uses a Zero-Order Hold (zoh) method to perform the transformation. Alternatively, a generalized bilinear transformation may be used, which includes the common Tustin's bilinear approximation, an Euler's method technique, or a backwards differencing technique. The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method is based on [4]_. Examples -------- We can transform a continuous state-space system to a discrete one: >>> import matplotlib.pyplot as plt >>> from scipy.signal import cont2discrete, lti, dlti, dstep Define a continious state-space system. >>> A = np.array([[0, 1],[-10, -3]]) >>> B = np.array([[0],[10]]) >>> C = np.array([[1, 0]]) >>> D = np.array([[0]]) >>> l_system = lti(A, B, C, D) >>> t, x = l_system.step(T=np.linspace(0,5,100)) >>> plt.plot(t, x, label='Continuous') >>> plt.hlines(1, t[0], t[-1], linestyles='dotted') Transform it to a discrete state-space system using several methods. >>> dt = 0.1 >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: ... d_system = cont2discrete((A, B, C, D), dt, method=method) ... s, x_d = dstep(d_system) ... plt.step(s, np.squeeze(x_d), label=method, where='post') >>> plt.axis([t[0], t[-1], x[0], 1.4]) >>> plt.legend(loc='best') >>> plt.tight_layout() >>> plt.show() References ---------- .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, 2009. (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, pp. 204-206, 1998. """""" if len(system) == 1: return system.to_discrete() if len(system) == 2: sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, alpha=alpha) return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 3: sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, method=method, alpha=alpha) return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 4: a, b, c, d = system else: raise ValueError(""First argument must either be a tuple of 2 (tf), "" ""3 (zpk), or 4 (ss) arrays."") if method == 'gbt': if alpha is None: raise ValueError(""Alpha parameter must be specified for the "" ""generalized bilinear transform (gbt) method"") elif alpha < 0 or alpha > 1: raise ValueError(""Alpha parameter must be within the interval "" ""[0,1] for the gbt method"") if method == 'gbt': # This parameter is used repeatedly - compute once here ima = np.eye(a.shape[0]) - alpha*dt*a ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) bd = linalg.solve(ima, dt*b) # Similarly solve for the output equation matrices cd = linalg.solve(ima.transpose(), c.transpose()) cd = cd.transpose() dd = d + alpha*np.dot(c, bd) elif method == 'bilinear' or method == 'tustin': return cont2discrete(system, dt, method=""gbt"", alpha=0.5) elif method == 'euler' or method == 'forward_diff': return cont2discrete(system, dt, method=""gbt"", alpha=0.0) elif method == 'backward_diff': return cont2discrete(system, dt, method=""gbt"", alpha=1.0) elif method == 'zoh': # Build an exponential matrix em_upper = np.hstack((a, b)) # Need to stack zeros under the a and b matrices em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), np.zeros((b.shape[1], b.shape[1])))) em = np.vstack((em_upper, em_lower)) ms = linalg.expm(dt * em) # Dispose of the lower rows ms = ms[:a.shape[0], :] ad = ms[:, 0:a.shape[1]] bd = ms[:, a.shape[1]:] cd = c dd = d elif method == 'foh': # Size parameters for convenience n = a.shape[0] m = b.shape[1] # Build an exponential matrix similar to 'zoh' method em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) em_lower = zeros((m, n + 2 * m)) em = np.block([[em_upper], [em_lower]]) ms = linalg.expm(em) # Get the three blocks from upper rows ms11 = ms[:n, 0:n] ms12 = ms[:n, n:n + m] ms13 = ms[:n, n + m:] ad = ms11 bd = ms12 - ms13 + ms11 @ ms13 cd = c dd = d + c @ ms13 elif method == 'impulse': if not np.allclose(d, 0): raise ValueError(""Impulse method is only applicable"" ""to strictly proper systems"") ad = linalg.expm(a * dt) bd = ad @ b * dt cd = c dd = c @ b * dt else: raise ValueError(""Unknown transformation method '%s'"" % method) return ad, bd, cd, dd, dt ","def cont2discrete(system, dt, method=""zoh"", alpha=None): """""" Transform a continuous to a discrete state-space system. Parameters ---------- system : a tuple describing the system or an instance of `lti` The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) dt : float The discretization time step. method : str, optional Which method to use: * gbt: generalized bilinear transformation * bilinear: Tustin's approximation (""gbt"" with alpha=0.5) * euler: Euler (or forward differencing) method (""gbt"" with alpha=0) * backward_diff: Backwards differencing (""gbt"" with alpha=1.0) * zoh: zero-order hold (default) * foh: first-order hold (*versionadded: 1.3.0*) * impulse: equivalent impulse response (*versionadded: 1.3.0*) alpha : float within [0, 1], optional The generalized bilinear transformation weighting parameter, which should only be specified with method=""gbt"", and is ignored otherwise Returns ------- sysd : tuple containing the discrete system Based on the input type, the output will be of the form * (num, den, dt) for transfer function input * (zeros, poles, gain, dt) for zeros-poles-gain input * (A, B, C, D, dt) for state-space system input Notes ----- By default, the routine uses a Zero-Order Hold (zoh) method to perform the transformation. Alternatively, a generalized bilinear transformation may be used, which includes the common Tustin's bilinear approximation, an Euler's method technique, or a backwards differencing technique. The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method is based on [4]_. Examples -------- We can transform a continuous state-space system to a discrete one: >>> import matplotlib.pyplot as plt >>> from scipy.signal import cont2discrete, lti, dlti, dstep Define a continious state-space system. >>> A = np.array([[0, 1],[-10, -3]]) >>> B = np.array([[0],[10]]) >>> C = np.array([[1, 0]]) >>> D = np.array([[0]]) >>> l_system = lti(A, B, C, D) >>> t, x = l_system.step(T=np.linspace(0, 5, 100)) >>> plt.plot(t, x, label='Continuous') >>> plt.hlines(1, t[0], t[-1], linestyles='dotted') Transform it to a discrete state-space system using several methods. >>> dt = 0.1 >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: ... d_system = cont2discrete((A, B, C, D), dt, method=method) ... s, x_d = dstep(d_system) ... plt.step(s, np.squeeze(x_d), label=method, where='post') >>> plt.axis([t[0], t[-1], x[0], 1.4]) >>> plt.legend(loc='best') >>> plt.tight_layout() >>> plt.show() References ---------- .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, 2009. (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, pp. 204-206, 1998. """""" if len(system) == 1: return system.to_discrete() if len(system) == 2: sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, alpha=alpha) return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 3: sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, method=method, alpha=alpha) return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 4: a, b, c, d = system else: raise ValueError(""First argument must either be a tuple of 2 (tf), "" ""3 (zpk), or 4 (ss) arrays."") if method == 'gbt': if alpha is None: raise ValueError(""Alpha parameter must be specified for the "" ""generalized bilinear transform (gbt) method"") elif alpha < 0 or alpha > 1: raise ValueError(""Alpha parameter must be within the interval "" ""[0,1] for the gbt method"") if method == 'gbt': # This parameter is used repeatedly - compute once here ima = np.eye(a.shape[0]) - alpha*dt*a ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) bd = linalg.solve(ima, dt*b) # Similarly solve for the output equation matrices cd = linalg.solve(ima.transpose(), c.transpose()) cd = cd.transpose() dd = d + alpha*np.dot(c, bd) elif method == 'bilinear' or method == 'tustin': return cont2discrete(system, dt, method=""gbt"", alpha=0.5) elif method == 'euler' or method == 'forward_diff': return cont2discrete(system, dt, method=""gbt"", alpha=0.0) elif method == 'backward_diff': return cont2discrete(system, dt, method=""gbt"", alpha=1.0) elif method == 'zoh': # Build an exponential matrix em_upper = np.hstack((a, b)) # Need to stack zeros under the a and b matrices em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), np.zeros((b.shape[1], b.shape[1])))) em = np.vstack((em_upper, em_lower)) ms = linalg.expm(dt * em) # Dispose of the lower rows ms = ms[:a.shape[0], :] ad = ms[:, 0:a.shape[1]] bd = ms[:, a.shape[1]:] cd = c dd = d elif method == 'foh': # Size parameters for convenience n = a.shape[0] m = b.shape[1] # Build an exponential matrix similar to 'zoh' method em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) em_lower = zeros((m, n + 2 * m)) em = np.block([[em_upper], [em_lower]]) ms = linalg.expm(em) # Get the three blocks from upper rows ms11 = ms[:n, 0:n] ms12 = ms[:n, n:n + m] ms13 = ms[:n, n + m:] ad = ms11 bd = ms12 - ms13 + ms11 @ ms13 cd = c dd = d + c @ ms13 elif method == 'impulse': if not np.allclose(d, 0): raise ValueError(""Impulse method is only applicable"" ""to strictly proper systems"") ad = linalg.expm(a * dt) bd = ad @ b * dt cd = c dd = c @ b * dt else: raise ValueError(""Unknown transformation method '%s'"" % method) return ad, bd, cd, dd, dt " 44758,"def get_default_conda_env(include_cloudpickle=False, keras_module=None): """""" :return: The default Conda environment for MLflow Models produced by calls to :func:`save_model()` and :func:`log_model()`. """""" import tensorflow as tf keras_dependency = [] # if we use tf.keras we only need to declare dependency on tensorflow pip_deps = None if keras_module is None: import keras keras_module = keras if keras_module.__name__ == ""keras"": # Temporary fix: the created conda environment has issues installing keras >= 2.3.1 if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'): keras_dependency = [""keras=={}"".format(keras_module.__version__)] else: pip_deps = [""keras=={}"".format(keras_module.__version__)] if include_cloudpickle: import cloudpickle if pip_deps is not None: pip_deps += [""cloudpickle=={}"".format(cloudpickle.__version__)] else: pip_deps = [""cloudpickle=={}"".format(cloudpickle.__version__)] # Temporary fix: conda-forge currently does not have tensorflow > 1.14 # The Keras pyfunc representation requires the TensorFlow # backend for Keras. Therefore, the conda environment must # include TensorFlow if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): keras_dependency += [""tensorflow=={}"".format(tf.__version__)] else: if pip_deps is not None: pip_deps += [""tensorflow=={}"".format(tf.__version__)] else: pip_deps = [""tensorflow=={}"".format(tf.__version__)] return _mlflow_conda_env( additional_conda_deps=keras_dependency, additional_pip_deps=pip_deps, additional_conda_channels=None) ","def get_default_conda_env(include_cloudpickle=False, keras_module=None): """""" :return: The default Conda environment for MLflow Models produced by calls to :func:`save_model()` and :func:`log_model()`. """""" import tensorflow as tf keras_dependency = [] # if we use tf.keras we only need to declare dependency on tensorflow pip_deps = [] if keras_module is None: import keras keras_module = keras if keras_module.__name__ == ""keras"": # Temporary fix: the created conda environment has issues installing keras >= 2.3.1 if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'): keras_dependency = [""keras=={}"".format(keras_module.__version__)] else: pip_deps = [""keras=={}"".format(keras_module.__version__)] if include_cloudpickle: import cloudpickle if pip_deps is not None: pip_deps += [""cloudpickle=={}"".format(cloudpickle.__version__)] else: pip_deps = [""cloudpickle=={}"".format(cloudpickle.__version__)] # Temporary fix: conda-forge currently does not have tensorflow > 1.14 # The Keras pyfunc representation requires the TensorFlow # backend for Keras. Therefore, the conda environment must # include TensorFlow if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): keras_dependency += [""tensorflow=={}"".format(tf.__version__)] else: if pip_deps is not None: pip_deps += [""tensorflow=={}"".format(tf.__version__)] else: pip_deps = [""tensorflow=={}"".format(tf.__version__)] return _mlflow_conda_env( additional_conda_deps=keras_dependency, additional_pip_deps=pip_deps, additional_conda_channels=None) " 21475,"def measure_func(name=None): """""" Used to decorator an async function with a `Measure` context manager. Usage: @measure_func async def foo(...): ... Which is analogous to: async def foo(...): with Measure(...): ... """""" def wrapper(func): block_name = func.__name__ if name is None else name @wraps(func) async def measured_func(self, *args, **kwargs): with Measure(self.clock, block_name): r = await func(self, *args, **kwargs) return r return measured_func return wrapper ","def measure_func(name=None): """""" Used to decorate an async function with a `Measure` context manager. Usage: @measure_func async def foo(...): ... Which is analogous to: async def foo(...): with Measure(...): ... """""" def wrapper(func): block_name = func.__name__ if name is None else name @wraps(func) async def measured_func(self, *args, **kwargs): with Measure(self.clock, block_name): r = await func(self, *args, **kwargs) return r return measured_func return wrapper " 57724,"def get_asset_group(client: Client, args: dict) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """"""Search Asset Group command. Args: client: Client which connects to api args: arguments for the request Returns: Human Readable Entry Context Raw Data """""" args_id = str(args.get('id')) url_suffix = f'/asset_groups/{args_id}' human_readable_markdown = '' response = client.http_request(message='GET', suffix=url_suffix).get('asset_group') if response: context = { 'Kenna.AssetGroup': { 'ID': int(response.get('id')), 'Name': str(response.get('name')), 'QueryString': str(response.get('querystring')), 'createdAt': str(response.get('created_at')), 'UpdatedAt': str(response.get('updated_at')), 'RiskMeterScore': int(response.get('risk_meter_score')), 'TrueRiskMeterScore': int(response.get('true_risk_meter_score')), 'AssetCount': int(response.get('asset_count')), 'VulnerabilityCount': int(response.get('vulnerability_count')), 'FixCount': int(response.get('fix_count')), 'TopPriorityCount': int(response.get('top_priority_count')), 'ActiveInternetBreachesCount': int(response.get('active_internet_breaches_count')), 'EasilyExploitableCount': int(response.get('easily_exploitable_count')), 'MalwareExploitableCount': int(response.get('malware_exploitable_count')), 'PopularTargetsCount': int(response.get('popular_targets_count')), 'UniqueOpenCVECount': int(response.get('unique_open_cve_count')), 'PredictedExploitableCount': int(response.get('predicted_exploitable_count')) } } human_readable_markdown += 'Name: ' + str(response.get('name')) + '\n' human_readable_markdown += 'ID: ' + str(response.get('id')) + '\n' human_readable_markdown += 'Asset Count: ' + str(response.get('asset_count')) + '\n' human_readable_markdown += 'Risk Meter Score: ' + str(response.get('risk_meter_score')) + '\n' human_readable_markdown += 'Vulnerability Count: ' + str(response.get('vulnerability_count')) + '\n' human_readable_markdown += 'Fix Count: ' + str(response.get('fix_count')) + '\n' human_readable_markdown += 'Active Internet Breaches Count: ' + str(response.get('active_internet_breaches_count')) + '\n' else: human_readable_markdown = ""Group not found."" return human_readable_markdown, context, response ","def get_asset_group(client: Client, args: dict) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """"""Search Asset Group command. Args: client: Client which connects to api args: arguments for the request Returns: Human Readable Entry Context Raw Data """""" args_id = str(args.get('id')) url_suffix = f'/asset_groups/{args_id}' human_readable_markdown = '' response = client.http_request(message='GET', suffix=url_suffix).get('asset_group') if response: wanted_keys = ['ID', 'Name', 'QueryString', 'CreatedAt', 'UpdatedAt', 'RiskMeterScore', 'TrueRiskMeterScore', 'AssetCount', 'VulnerabilityCount', 'FixCount', 'TopPriorityCount', 'ActiveInternetBreachesCount', 'EasilyExploitableCount', 'MalwareExploitableCount', 'PopularTargetsCount', 'UniqueOpenCVECount', 'PredictedExploitableCount'] actual_keys = ['id', 'name', 'querystring', 'created_at', 'updated_at', 'risk_meter_score', 'true_risk_meter_score', 'asset_count', 'vulnerability_count', 'fix_count', 'top_priority_count', 'active_internet_breaches_count', 'easily_exploitable_count', 'malware_exploitable_count', 'popular_targets_count', 'unique_open_cve_count', 'predicted_exploitable_count'] context_list = parse_response(response, wanted_keys, actual_keys) context = {'Kenna.AssetGroups(val.ID === obj.ID)': context_list} human_readable_markdown += 'Name: ' + str(response.get('name')) + '\n' human_readable_markdown += 'ID: ' + str(response.get('id')) + '\n' human_readable_markdown += 'Asset Count: ' + str(response.get('asset_count')) + '\n' human_readable_markdown += 'Risk Meter Score: ' + str(response.get('risk_meter_score')) + '\n' human_readable_markdown += 'Vulnerability Count: ' + str(response.get('vulnerability_count')) + '\n' human_readable_markdown += 'Fix Count: ' + str(response.get('fix_count')) + '\n' human_readable_markdown += 'Active Internet Breaches Count: ' + str(response.get('active_internet_breaches_count')) + '\n' else: human_readable_markdown = ""Group not found."" return human_readable_markdown, context, response " 31864,"def redlock_get_scan_results(): """""" Get DevOps Scan Results """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={}) if ( not response or 'data' not in response or not isinstance(response['data'], list) ): demisto.results('No results found') else: items = response['data'] readable_output = [] for item in items: readable_output.append({ ""ID"": item.get('id'), ""Name"": item.get('attributes')['name'], ""Policy ID"": item.get('attributes')['policyId'], ""Description"": item.get('attributes')['desc'], ""Severity"": item.get('attributes')['severity'] }) results = { ""id"": scan_id, ""results"": items } md = tableToMarkdown(""Scan Results:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': results, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': results}, 'HumanReadable': md }) ","def redlock_get_scan_results(): """""" Get DevOps Scan Results """""" scan_id = demisto.args().get('scan_id', None) response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={}) if ( not response or 'data' not in response or not isinstance(response['data'], list) ): demisto.results('No results found') else: items = response.get('data', []) readable_output = [] for item in items: readable_output.append({ ""ID"": item.get('id'), ""Name"": item.get('attributes')['name'], ""Policy ID"": item.get('attributes')['policyId'], ""Description"": item.get('attributes')['desc'], ""Severity"": item.get('attributes')['severity'] }) results = { ""id"": scan_id, ""results"": items } md = tableToMarkdown(""Scan Results:"", readable_output) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': results, 'EntryContext': {'Redlock.Scans(val.id == obj.id)': results}, 'HumanReadable': md }) " 27135,"def _get_upstream_dataset_events(*, dag_run: DagRun, session: Session = NEW_SESSION) -> List[""DagRun""]: """"""If dag run is dataset-triggered, return the dataset events that triggered it."""""" if not dag_run.run_type == DagRunType.DATASET_TRIGGERED: return [] previous_dag_run = ( session.query(DagRun) .filter( DagRun.dag_id == dag_run.dag_id, DagRun.execution_date < dag_run.execution_date, DagRun.run_type == DagRunType.DATASET_TRIGGERED, ) .order_by(DagRun.execution_date.desc()) .first() ) dataset_event_filters = [ DatasetDagRef.dag_id == dag_run.dag_id, DatasetEvent.created_at <= dag_run.execution_date, ] if previous_dag_run: dataset_event_filters.append(DatasetEvent.created_at > previous_dag_run.execution_date) dataset_events = ( session.query(DatasetEvent) .join(DatasetDagRef, DatasetEvent.dataset_id == DatasetDagRef.dataset_id) .filter(*dataset_event_filters) .all() ) return dataset_events ","def _get_upstream_dataset_events(*, dag_run: DagRun, session: Session) -> List[""DagRun""]: """"""If dag run is dataset-triggered, return the dataset events that triggered it."""""" if not dag_run.run_type == DagRunType.DATASET_TRIGGERED: return [] previous_dag_run = ( session.query(DagRun) .filter( DagRun.dag_id == dag_run.dag_id, DagRun.execution_date < dag_run.execution_date, DagRun.run_type == DagRunType.DATASET_TRIGGERED, ) .order_by(DagRun.execution_date.desc()) .first() ) dataset_event_filters = [ DatasetDagRef.dag_id == dag_run.dag_id, DatasetEvent.created_at <= dag_run.execution_date, ] if previous_dag_run: dataset_event_filters.append(DatasetEvent.created_at > previous_dag_run.execution_date) dataset_events = ( session.query(DatasetEvent) .join(DatasetDagRef, DatasetEvent.dataset_id == DatasetDagRef.dataset_id) .filter(*dataset_event_filters) .all() ) return dataset_events " 53640,"def file_info_from_modpath( modpath: list[str], path: Sequence[str] | None = None, context_file: str | None = None, ) -> spec.ModuleSpec: """"""Given a mod path (i.e. split module / package name), return the corresponding file, giving priority to source file over precompiled. file if it exists :param modpath: split module's name (i.e name of a module or package split on '.') (this means explicit relative imports that start with dots have empty strings in this list!) :param path: optional list of path where the module or package should be searched (use sys.path if nothing or None is given) :param context_file: context file to consider, necessary if the identifier has been introduced using a relative import unresolvable in the actual context (i.e. modutils) :raise ImportError: if there is no such module in the directory :return: the path to the module's file or None if it's an integrated builtin module such as 'sys' """""" if context_file is not None: context: str | None = os.path.dirname(context_file) else: context = context_file if modpath[0] == ""xml"": # handle _xmlplus try: return _spec_from_modpath([""_xmlplus""] + modpath[1:], path, context) except ImportError: return _spec_from_modpath(modpath, path, context) elif modpath == [""os"", ""path""]: # FIXME: currently ignoring search_path... return spec.ModuleSpec( name=""os.path"", location=os.path.__file__, type=spec.ModuleType.PY_SOURCE, ) return _spec_from_modpath(modpath, path, context) ","def file_info_from_modpath( modpath: list[str], path: Sequence[str] | None = None, context_file: str | None = None, ) -> spec.ModuleSpec: """"""Given a mod path (i.e. split module / package name), return the corresponding file. Giving priority to source file over precompiled file if it exists. :param modpath: split module's name (i.e name of a module or package split on '.') (this means explicit relative imports that start with dots have empty strings in this list!) :param path: optional list of path where the module or package should be searched (use sys.path if nothing or None is given) :param context_file: context file to consider, necessary if the identifier has been introduced using a relative import unresolvable in the actual context (i.e. modutils) :raise ImportError: if there is no such module in the directory :return: the path to the module's file or None if it's an integrated builtin module such as 'sys' """""" if context_file is not None: context: str | None = os.path.dirname(context_file) else: context = context_file if modpath[0] == ""xml"": # handle _xmlplus try: return _spec_from_modpath([""_xmlplus""] + modpath[1:], path, context) except ImportError: return _spec_from_modpath(modpath, path, context) elif modpath == [""os"", ""path""]: # FIXME: currently ignoring search_path... return spec.ModuleSpec( name=""os.path"", location=os.path.__file__, type=spec.ModuleType.PY_SOURCE, ) return _spec_from_modpath(modpath, path, context) " 23209,"def xontribs_list(to_json=False): """"""List installed xontribs and show whether they are loaded or not Parameters ---------- to_json : -j, --json reports results as json """""" data = xontrib_data() if to_json: s = json.dumps(data) print(s) else: nname = max([6] + [len(x) for x in data]) s = """" for name, d in data.items(): s += ""{PURPLE}"" + name + ""{RESET} "" + "" "" * (nname - len(name)) if d[""loaded""]: s += ""{GREEN}loaded{RESET}"" + "" "" * 4 else: s += ""{RED}not-loaded{RESET}"" s += "" "" if d[""auto""]: s += ""{GREEN}auto{RESET}"" else: s += ""{CYAN}manual{RESET}"" s += ""\n"" print_color(s[:-1]) ","def xontribs_list(to_json=False): """"""List installed xontribs and show whether they are loaded or not Parameters ---------- to_json : -j, --json reports results as json """""" data = xontrib_data() if to_json: s = json.dumps(data) print(s) else: nname = max([6] + [len(x) for x in data]) s = """" for name, d in data.items(): s += ""{PURPLE}"" + name + ""{RESET} "" + "" "" * (nname - len(name)) if d[""loaded""]: s += ""{GREEN}loaded{RESET}"" + "" "" * 4 else: s += ""{RED}not-loaded{RESET}"" s += "" "" if d[""auto""]: s += ""{GREEN}auto{RESET}"" elif d[""loaded""]: s += ""{CYAN}manual{RESET}"" s += ""\n"" print_color(s[:-1]) " 30314,"def return_warning(message, exit=True, warning='', outputs=None, ignore_auto_extract=False): """""" Returns error entry with given message and exits the script :type message: ``str`` :param message: The message to return in the entry (required) :type exit: ``bool`` :param exit: Determines if the program will terminate after the command. Default is False. :type warning: ``str`` :param warning: The raw warning message to log (optional) :type outputs: ``dict or None`` :param outputs: the outputs that will be returned to playbook/investigation context (optional) :type ignore_auto_extract: ``bool`` :param ignore_auto_extract: Determines if the war-room entry will be auto enriched. Default is false. :return: Error entry object :rtype: ``dict`` """""" LOG(message) if warning: LOG(warning) LOG.print_log() demisto.results({ 'Type': entryTypes['error'], 'ContentsFormat': formats['text'], 'IgnoreAutoExtract': ignore_auto_extract, 'Contents': str(message), ""EntryContext"": outputs }) if exit: sys.exit(0) ","def return_warning(message, exit=True, warning='', outputs=None, ignore_auto_extract=False): """""" Returns error entry with given message and exits the script :type message: ``str`` :param message: The message to return in the entry (required) :type exit: ``bool`` :param exit: Determines if the program will terminate after the command. Default is False. :type warning: ``str`` :param warning: The raw warning message to log (optional) :type outputs: ``dict or None`` :param outputs: the outputs that will be returned to playbook/investigation context (optional) :type ignore_auto_extract: ``bool`` :param ignore_auto_extract: Determines if the war-room entry will be auto enriched. Default is false. :return: Error entry object :rtype: ``dict`` """""" LOG(message) if warning: LOG(warning) LOG.print_log() demisto.results({ 'Type': 11, 'ContentsFormat': formats['text'], 'IgnoreAutoExtract': ignore_auto_extract, 'Contents': str(message), ""EntryContext"": outputs }) if exit: sys.exit(0) " 25349,"def bold(text: str, escape_formatting: bool = True) -> str: """"""Get the given text in bold. Note: This escapes text prior to bolding. Parameters ---------- text : str The text to be marked up. escape_formatting : `bool`, optional Set to :code:`False` to not escape markdown formatting in the text. Returns ------- str The marked up text. """""" text = escape(text, formatting=escape_formatting) return ""**{}**"".format(text) ","def bold(text: str, escape_formatting: bool = True) -> str: """"""Get the given text in bold. Note: By default, this function will escape ``text`` prior to emboldening. Parameters ---------- text : str The text to be marked up. escape_formatting : `bool`, optional Set to :code:`False` to not escape markdown formatting in the text. Returns ------- str The marked up text. """""" text = escape(text, formatting=escape_formatting) return ""**{}**"".format(text) " 28597,"def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax ","def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`arviz.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax " 45138,"def list_feature_flags( batch_size: int = 10, client: FeatureFlagClient = None ) -> List[FeatureFlag]: """""" List all feature flags. This function always returns an empty list if the setting PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false. Args: batch_size: batch size of flags to retrieve at a time client: The FeatureFlagClient instance to use. Defaults to a client configured to look at an in-memory feature store. Returns: List[FeatureFlag]: list of all feature flags in the store """""" if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value(): return [] if not client: client = get_features_client() flags = [] offset = 0 while True: batch = list(client.list(limit=batch_size, offset=offset)) if not batch: break flags.extend(batch) offset += batch_size return flags ","def list_feature_flags( batch_size: int = 10, client: FeatureFlagClient = None ) -> List[FeatureFlag]: """""" List all feature flags. This function always returns an empty list if the setting `PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING` is false. Args: batch_size: batch size of flags to retrieve at a time client: The FeatureFlagClient instance to use. Defaults to a client configured to look at an in-memory feature store. Returns: List[FeatureFlag]: list of all feature flags in the store """""" if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value(): return [] if not client: client = get_features_client() flags = [] offset = 0 while True: batch = list(client.list(limit=batch_size, offset=offset)) if not batch: break flags.extend(batch) offset += batch_size return flags " 37821,"def main() -> None: platform: PlatformName parser = argparse.ArgumentParser( description=""Build wheels for all the platforms."", epilog="""""" Most options are supplied via environment variables or in --config-file (pyproject.toml usually). See https://github.com/pypa/cibuildwheel#options for info. """""", ) parser.add_argument( ""--platform"", choices=[""auto"", ""linux"", ""crosslinux"", ""macos"", ""windows""], default=os.environ.get(""CIBW_PLATFORM"", ""auto""), help="""""" Platform to build for. For ""linux"" you need docker running, on Mac or Linux. For ""macos"", you need a Mac machine, and note that this script is going to automatically install MacPython on your system, so don't run on your development machine. For ""windows"", you need to run in Windows, and it will build and test for all versions of Python. Default: auto. """""", ) arch_list_str = "", "".join(a.name for a in Architecture) parser.add_argument( ""--archs"", default=None, help=f"""""" Comma-separated list of CPU architectures to build for. When set to 'auto', builds the architectures natively supported on this machine. Set this option to build an architecture via emulation, for example, using binfmt_misc and QEMU. Default: auto. Choices: auto, auto64, auto32, native, all, {arch_list_str} """""", ) parser.add_argument( ""--output-dir"", help=""Destination folder for the wheels."", ) parser.add_argument( ""--config-file"", help="""""" TOML config file for cibuildwheel. Defaults to pyproject.toml, but can be overridden with this option. """""", ) parser.add_argument( ""package_dir"", default=""."", nargs=""?"", help="""""" Path to the package that you want wheels for. Must be a subdirectory of the working directory. When set, the working directory is still considered the 'project' and is copied into the Docker container on Linux. Default: the working directory. """""", ) parser.add_argument( ""--print-build-identifiers"", action=""store_true"", help=""Print the build identifiers matched by the current invocation and exit."", ) parser.add_argument( ""--allow-empty"", action=""store_true"", help=""Do not report an error code if the build does not match any wheels."", ) parser.add_argument( ""--prerelease-pythons"", action=""store_true"", help=""Enable pre-release Python versions if available."", ) args = parser.parse_args() if args.platform != ""auto"": platform = args.platform else: ci_provider = detect_ci_provider() if ci_provider is None: print( textwrap.dedent( """""" cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server; Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, and Gitlab are supported. You can run on your development machine or other CI providers using the --platform argument. Check --help output for more information. """""" ), file=sys.stderr, ) sys.exit(2) if sys.platform.startswith(""linux""): platform = ""linux"" elif sys.platform == ""darwin"": platform = ""macos"" elif sys.platform == ""win32"": platform = ""windows"" else: print( 'cibuildwheel: Unable to detect platform from ""sys.platform"" in a CI environment. You can run ' ""cibuildwheel using the --platform argument. Check --help output for more information."", file=sys.stderr, ) sys.exit(2) if platform not in PLATFORMS: print(f""cibuildwheel: Unsupported platform: {platform}"", file=sys.stderr) sys.exit(2) package_dir = Path(args.package_dir) manylinux_identifiers = { f""manylinux-{build_platform}-image"" for build_platform in MANYLINUX_ARCHS } disallow = { ""linux"": {""dependency-versions""}, ""macos"": manylinux_identifiers, ""windows"": manylinux_identifiers, } options = ConfigOptions(package_dir, args.config_file, platform=platform, disallow=disallow) output_dir = Path( args.output_dir if args.output_dir is not None else os.environ.get(""CIBW_OUTPUT_DIR"", ""wheelhouse"") ) build_config = options(""build"", env_plat=False, sep="" "") or ""*"" skip_config = options(""skip"", env_plat=False, sep="" "") test_skip = options(""test-skip"", env_plat=False, sep="" "") archs_config_str = args.archs or options(""archs"", sep="" "") build_frontend_str = options(""build-frontend"", env_plat=False) environment_config = options(""environment"", table={""item"": '{k}=""{v}""', ""sep"": "" ""}) before_all = options(""before-all"", sep="" && "") before_build = options(""before-build"", sep="" && "") repair_command = options(""repair-wheel-command"", sep="" && "") dependency_versions = options(""dependency-versions"") test_command = options(""test-command"", sep="" && "") before_test = options(""before-test"", sep="" && "") test_requires = options(""test-requires"", sep="" "").split() test_extras = options(""test-extras"", sep="","") build_verbosity_str = options(""build-verbosity"") prerelease_pythons = args.prerelease_pythons or cibuildwheel.util.strtobool( os.environ.get(""CIBW_PRERELEASE_PYTHONS"", ""0"") ) build_frontend: BuildFrontend if build_frontend_str == ""build"": build_frontend = ""build"" elif build_frontend_str == ""pip"": build_frontend = ""pip"" else: msg = f""cibuildwheel: Unrecognised build frontend '{build_frontend}', only 'pip' and 'build' are supported"" print(msg, file=sys.stderr) sys.exit(2) package_files = {""setup.py"", ""setup.cfg"", ""pyproject.toml""} if not any(package_dir.joinpath(name).exists() for name in package_files): names = "", "".join(sorted(package_files, reverse=True)) msg = f""cibuildwheel: Could not find any of {{{names}}} at root of package"" print(msg, file=sys.stderr) sys.exit(2) # This is not supported in tool.cibuildwheel, as it comes from a standard location. # Passing this in as an environment variable will override pyproject.toml, setup.cfg, or setup.py requires_python_str: Optional[str] = os.environ.get( ""CIBW_PROJECT_REQUIRES_PYTHON"" ) or get_requires_python_str(package_dir) requires_python = None if requires_python_str is None else SpecifierSet(requires_python_str) deprecated_selectors(""CIBW_BUILD"", build_config, error=True) deprecated_selectors(""CIBW_SKIP"", skip_config) deprecated_selectors(""CIBW_TEST_SKIP"", test_skip) build_selector = BuildSelector( build_config=build_config, skip_config=skip_config, requires_python=requires_python, prerelease_pythons=prerelease_pythons, ) test_selector = TestSelector(skip_config=test_skip) try: environment = parse_environment(environment_config) except (EnvironmentParseError, ValueError): print(f'cibuildwheel: Malformed environment option ""{environment_config}""', file=sys.stderr) traceback.print_exc(None, sys.stderr) sys.exit(2) if dependency_versions == ""pinned"": dependency_constraints: Optional[ DependencyConstraints ] = DependencyConstraints.with_defaults() elif dependency_versions == ""latest"": dependency_constraints = None else: dependency_versions_path = Path(dependency_versions) dependency_constraints = DependencyConstraints(dependency_versions_path) if test_extras: test_extras = f""[{test_extras}]"" try: build_verbosity = min(3, max(-3, int(build_verbosity_str))) except ValueError: build_verbosity = 0 # Add CIBUILDWHEEL environment variable # This needs to be passed on to the docker container in linux.py os.environ[""CIBUILDWHEEL""] = ""1"" archs = Architecture.parse_config(archs_config_str, platform=platform) identifiers = get_build_identifiers(platform, build_selector, archs) if args.print_build_identifiers: for identifier in identifiers: print(identifier) sys.exit(0) manylinux_images: Dict[str, str] = {} if platform == ""linux"" or platform == ""crosslinux"": pinned_docker_images_file = resources_dir / ""pinned_docker_images.cfg"" all_pinned_docker_images = ConfigParser() all_pinned_docker_images.read(pinned_docker_images_file) # all_pinned_docker_images looks like a dict of dicts, e.g. # { 'x86_64': {'manylinux1': '...', 'manylinux2010': '...', 'manylinux2014': '...'}, # 'i686': {'manylinux1': '...', 'manylinux2010': '...', 'manylinux2014': '...'}, # 'pypy_x86_64': {'manylinux2010': '...' } # ... } for build_platform in MANYLINUX_ARCHS: pinned_images = all_pinned_docker_images[build_platform] config_value = options(f""manylinux-{build_platform}-image"") if config_value is None: # default to manylinux2010 if it's available, otherwise manylinux2014 image = pinned_images.get(""manylinux2010"") or pinned_images.get(""manylinux2014"") elif config_value in pinned_images: image = pinned_images[config_value] else: image = config_value manylinux_images[build_platform] = image build_options = BuildOptions( architectures=archs, package_dir=package_dir, output_dir=output_dir, test_command=test_command, test_requires=test_requires, test_extras=test_extras, before_test=before_test, before_build=before_build, before_all=before_all, build_verbosity=build_verbosity, build_selector=build_selector, test_selector=test_selector, repair_command=repair_command, environment=environment, dependency_constraints=dependency_constraints, manylinux_images=manylinux_images or None, build_frontend=build_frontend, ) # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print' sys.stdout = Unbuffered(sys.stdout) # type: ignore print_preamble(platform, build_options) try: allowed_architectures_check(platform, build_options.architectures) except ValueError as err: print(""cibuildwheel:"", *err.args, file=sys.stderr) sys.exit(4) if not identifiers: print(f""cibuildwheel: No build identifiers selected: {build_selector}"", file=sys.stderr) if not args.allow_empty: sys.exit(3) if not output_dir.exists(): output_dir.mkdir(parents=True) with cibuildwheel.util.print_new_wheels( ""\n{n} wheels produced in {m:.0f} minutes:"", output_dir ): if platform == ""linux"": cibuildwheel.linux.build(build_options) elif platform == ""crosslinux"": cibuildwheel.crosslinux.build(build_options) elif platform == ""windows"": cibuildwheel.windows.build(build_options) elif platform == ""macos"": cibuildwheel.macos.build(build_options) else: assert_never(platform) ","def main() -> None: platform: PlatformName parser = argparse.ArgumentParser( description=""Build wheels for all the platforms."", epilog="""""" Most options are supplied via environment variables or in --config-file (pyproject.toml usually). See https://github.com/pypa/cibuildwheel#options for info. """""", ) parser.add_argument( ""--platform"", choices=[""auto"", ""linux"", ""crosslinux"", ""macos"", ""windows""], default=os.environ.get(""CIBW_PLATFORM"", ""auto""), help="""""" Platform to build for. For ""linux"" you need docker running, on Mac or Linux. For ""macos"", you need a Mac machine, and note that this script is going to automatically install MacPython on your system, so don't run on your development machine. For ""windows"", you need to run in Windows, and it will build and test for all versions of Python. Default: auto. """""", ) arch_list_str = "", "".join(a.name for a in Architecture) parser.add_argument( ""--archs"", default=None, help=f"""""" Comma-separated list of CPU architectures to build for. When set to 'auto', builds the architectures natively supported on this machine. Set this option to build an architecture via emulation, for example, using binfmt_misc and QEMU. Default: auto. Choices: auto, auto64, auto32, native, all, {arch_list_str} """""", ) parser.add_argument( ""--output-dir"", help=""Destination folder for the wheels."", ) parser.add_argument( ""--config-file"", help="""""" TOML config file for cibuildwheel. Defaults to pyproject.toml, but can be overridden with this option. """""", ) parser.add_argument( ""package_dir"", default=""."", nargs=""?"", help="""""" Path to the package that you want wheels for. Must be a subdirectory of the working directory. When set, the working directory is still considered the 'project' and is copied into the Docker container on Linux. Default: the working directory. """""", ) parser.add_argument( ""--print-build-identifiers"", action=""store_true"", help=""Print the build identifiers matched by the current invocation and exit."", ) parser.add_argument( ""--allow-empty"", action=""store_true"", help=""Do not report an error code if the build does not match any wheels."", ) parser.add_argument( ""--prerelease-pythons"", action=""store_true"", help=""Enable pre-release Python versions if available."", ) args = parser.parse_args() if args.platform != ""auto"": platform = args.platform else: ci_provider = detect_ci_provider() if ci_provider is None: print( textwrap.dedent( """""" cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server; Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, and Gitlab are supported. You can run on your development machine or other CI providers using the --platform argument. Check --help output for more information. """""" ), file=sys.stderr, ) sys.exit(2) if sys.platform.startswith(""linux""): platform = ""linux"" elif sys.platform == ""darwin"": platform = ""macos"" elif sys.platform == ""win32"": platform = ""windows"" else: print( 'cibuildwheel: Unable to detect platform from ""sys.platform"" in a CI environment. You can run ' ""cibuildwheel using the --platform argument. Check --help output for more information."", file=sys.stderr, ) sys.exit(2) if platform not in PLATFORMS: print(f""cibuildwheel: Unsupported platform: {platform}"", file=sys.stderr) sys.exit(2) package_dir = Path(args.package_dir) manylinux_identifiers = { f""manylinux-{build_platform}-image"" for build_platform in MANYLINUX_ARCHS } disallow = { ""linux"": {""dependency-versions""}, ""macos"": manylinux_identifiers, ""windows"": manylinux_identifiers, } options = ConfigOptions(package_dir, args.config_file, platform=platform, disallow=disallow) output_dir = Path( args.output_dir if args.output_dir is not None else os.environ.get(""CIBW_OUTPUT_DIR"", ""wheelhouse"") ) build_config = options(""build"", env_plat=False, sep="" "") or ""*"" skip_config = options(""skip"", env_plat=False, sep="" "") test_skip = options(""test-skip"", env_plat=False, sep="" "") archs_config_str = args.archs or options(""archs"", sep="" "") build_frontend_str = options(""build-frontend"", env_plat=False) environment_config = options(""environment"", table={""item"": '{k}=""{v}""', ""sep"": "" ""}) before_all = options(""before-all"", sep="" && "") before_build = options(""before-build"", sep="" && "") repair_command = options(""repair-wheel-command"", sep="" && "") dependency_versions = options(""dependency-versions"") test_command = options(""test-command"", sep="" && "") before_test = options(""before-test"", sep="" && "") test_requires = options(""test-requires"", sep="" "").split() test_extras = options(""test-extras"", sep="","") build_verbosity_str = options(""build-verbosity"") prerelease_pythons = args.prerelease_pythons or cibuildwheel.util.strtobool( os.environ.get(""CIBW_PRERELEASE_PYTHONS"", ""0"") ) build_frontend: BuildFrontend if build_frontend_str == ""build"": build_frontend = ""build"" elif build_frontend_str == ""pip"": build_frontend = ""pip"" else: msg = f""cibuildwheel: Unrecognised build frontend '{build_frontend}', only 'pip' and 'build' are supported"" print(msg, file=sys.stderr) sys.exit(2) package_files = {""setup.py"", ""setup.cfg"", ""pyproject.toml""} if not any(package_dir.joinpath(name).exists() for name in package_files): names = "", "".join(sorted(package_files, reverse=True)) msg = f""cibuildwheel: Could not find any of {{{names}}} at root of package"" print(msg, file=sys.stderr) sys.exit(2) # This is not supported in tool.cibuildwheel, as it comes from a standard location. # Passing this in as an environment variable will override pyproject.toml, setup.cfg, or setup.py requires_python_str: Optional[str] = os.environ.get( ""CIBW_PROJECT_REQUIRES_PYTHON"" ) or get_requires_python_str(package_dir) requires_python = None if requires_python_str is None else SpecifierSet(requires_python_str) deprecated_selectors(""CIBW_BUILD"", build_config, error=True) deprecated_selectors(""CIBW_SKIP"", skip_config) deprecated_selectors(""CIBW_TEST_SKIP"", test_skip) build_selector = BuildSelector( build_config=build_config, skip_config=skip_config, requires_python=requires_python, prerelease_pythons=prerelease_pythons, ) test_selector = TestSelector(skip_config=test_skip) try: environment = parse_environment(environment_config) except (EnvironmentParseError, ValueError): print(f'cibuildwheel: Malformed environment option ""{environment_config}""', file=sys.stderr) traceback.print_exc(None, sys.stderr) sys.exit(2) if dependency_versions == ""pinned"": dependency_constraints: Optional[ DependencyConstraints ] = DependencyConstraints.with_defaults() elif dependency_versions == ""latest"": dependency_constraints = None else: dependency_versions_path = Path(dependency_versions) dependency_constraints = DependencyConstraints(dependency_versions_path) if test_extras: test_extras = f""[{test_extras}]"" try: build_verbosity = min(3, max(-3, int(build_verbosity_str))) except ValueError: build_verbosity = 0 # Add CIBUILDWHEEL environment variable # This needs to be passed on to the docker container in linux.py os.environ[""CIBUILDWHEEL""] = ""1"" archs = Architecture.parse_config(archs_config_str, platform=platform) identifiers = get_build_identifiers(platform, build_selector, archs) if args.print_build_identifiers: for identifier in identifiers: print(identifier) sys.exit(0) manylinux_images: Dict[str, str] = {} if platform in {""linux"", ""crosslinux""}: pinned_docker_images_file = resources_dir / ""pinned_docker_images.cfg"" all_pinned_docker_images = ConfigParser() all_pinned_docker_images.read(pinned_docker_images_file) # all_pinned_docker_images looks like a dict of dicts, e.g. # { 'x86_64': {'manylinux1': '...', 'manylinux2010': '...', 'manylinux2014': '...'}, # 'i686': {'manylinux1': '...', 'manylinux2010': '...', 'manylinux2014': '...'}, # 'pypy_x86_64': {'manylinux2010': '...' } # ... } for build_platform in MANYLINUX_ARCHS: pinned_images = all_pinned_docker_images[build_platform] config_value = options(f""manylinux-{build_platform}-image"") if config_value is None: # default to manylinux2010 if it's available, otherwise manylinux2014 image = pinned_images.get(""manylinux2010"") or pinned_images.get(""manylinux2014"") elif config_value in pinned_images: image = pinned_images[config_value] else: image = config_value manylinux_images[build_platform] = image build_options = BuildOptions( architectures=archs, package_dir=package_dir, output_dir=output_dir, test_command=test_command, test_requires=test_requires, test_extras=test_extras, before_test=before_test, before_build=before_build, before_all=before_all, build_verbosity=build_verbosity, build_selector=build_selector, test_selector=test_selector, repair_command=repair_command, environment=environment, dependency_constraints=dependency_constraints, manylinux_images=manylinux_images or None, build_frontend=build_frontend, ) # Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print' sys.stdout = Unbuffered(sys.stdout) # type: ignore print_preamble(platform, build_options) try: allowed_architectures_check(platform, build_options.architectures) except ValueError as err: print(""cibuildwheel:"", *err.args, file=sys.stderr) sys.exit(4) if not identifiers: print(f""cibuildwheel: No build identifiers selected: {build_selector}"", file=sys.stderr) if not args.allow_empty: sys.exit(3) if not output_dir.exists(): output_dir.mkdir(parents=True) with cibuildwheel.util.print_new_wheels( ""\n{n} wheels produced in {m:.0f} minutes:"", output_dir ): if platform == ""linux"": cibuildwheel.linux.build(build_options) elif platform == ""crosslinux"": cibuildwheel.crosslinux.build(build_options) elif platform == ""windows"": cibuildwheel.windows.build(build_options) elif platform == ""macos"": cibuildwheel.macos.build(build_options) else: assert_never(platform) " 34164,"def add_server_arguments(parser: argparse.ArgumentParser): parser.add_argument( ""--log-file"", type=str, default=""rasa_core.log"", help=""Store logs in specified file."", ) add_endpoint_param( parser, help_text=""Configuration file for the model server and the connectors as a yml file."", ) server_arguments = parser.add_argument_group(""Server Settings"") server_arguments.add_argument( ""-p"", ""--port"", default=constants.DEFAULT_SERVER_PORT, type=int, help=""Port to run the server at."", ) server_arguments.add_argument( ""-t"", ""--auth-token"", type=str, help=""Enable token based authentication. Requests need to provide "" ""the token to be accepted."", ) server_arguments.add_argument( ""--cors"", nargs=""*"", type=str, help=""Enable CORS for the passed origin. Use * to whitelist all origins."", ) server_arguments.add_argument( ""--enable-api"", action=""store_true"", help=""Start the web server API in addition to the input channel."", ) server_arguments.add_argument( ""--remote-storage"", help=""Set the remote location where your Rasa model is stored, "" ""e.g. on AWS."", ) channel_arguments = parser.add_argument_group(""Channels"") channel_arguments.add_argument( ""--credentials"", default=None, help=""Authentication credentials for the connector as a yml file."", ) channel_arguments.add_argument( ""--connector"", type=str, help=""Service to connect to."" ) jwt_auth = parser.add_argument_group(""JWT Authentication"") jwt_auth.add_argument( ""--jwt-secret"", type=str, help=""Public key for asymmetric JWT methods or shared secret"" ""for symmetric methods. Please also make sure to use "" ""--jwt-method to select the method of the signature, "" ""otherwise this argument will be ignored."", ) jwt_auth.add_argument( ""--jwt-method"", type=str, default=""HS256"", help=""Method used for the signature of the JWT authentication payload."", ) ","def add_server_arguments(parser: argparse.ArgumentParser): parser.add_argument( ""--log-file"", type=str, default=""rasa_core.log"", help=""Store logs in specified file."", ) add_endpoint_param( parser, help_text=""Configuration file for the model server and the connectors as a yml file."", ) server_arguments = parser.add_argument_group(""Server Settings"") server_arguments.add_argument( ""-p"", ""--port"", default=constants.DEFAULT_SERVER_PORT, type=int, help=""Port to run the server at."", ) server_arguments.add_argument( ""-t"", ""--auth-token"", type=str, help=""Enable token based authentication. Requests need to provide "" ""the token to be accepted."", ) server_arguments.add_argument( ""--cors"", nargs=""*"", type=str, help=""Enable CORS for the passed origin. Use * to whitelist all origins."", ) server_arguments.add_argument( ""--enable-api"", action=""store_true"", help=""Start the web server API in addition to the input channel."", ) server_arguments.add_argument( ""--remote-storage"", help=""Set the remote location where your Rasa model is stored, e.g. on AWS."", ) channel_arguments = parser.add_argument_group(""Channels"") channel_arguments.add_argument( ""--credentials"", default=None, help=""Authentication credentials for the connector as a yml file."", ) channel_arguments.add_argument( ""--connector"", type=str, help=""Service to connect to."" ) jwt_auth = parser.add_argument_group(""JWT Authentication"") jwt_auth.add_argument( ""--jwt-secret"", type=str, help=""Public key for asymmetric JWT methods or shared secret"" ""for symmetric methods. Please also make sure to use "" ""--jwt-method to select the method of the signature, "" ""otherwise this argument will be ignored."", ) jwt_auth.add_argument( ""--jwt-method"", type=str, default=""HS256"", help=""Method used for the signature of the JWT authentication payload."", ) " 30968,"def update_package(single_item): """""" Updates a single package on the marketplace Added dependency management, do not update if dependency is not installed """""" change_log_keys = list(single_item['changelog'].keys()) # Grab the latest version latest_ver = max(change_log_keys, key=major_minor_micro) # Grab Name of package id_item = single_item['id'] # Grab dependencies of package dependencies = single_item['dependencies'] # True for good to update, False for dependency missing boolres = checkDependencies(dependencies) if not boolres: print(f""Dependency missing from {id_item}, skipping.. Please update {id_item} manually"") return boolres data = { ""packs"": [{ ""id"": id_item, ""version"": latest_ver, ""transition"": None, ""skipInstall"": False }], ""ignoreWarnings"": False, ""transitionPrice"": 0 } demisto.executeCommand(""demisto-api-post"", {""uri"": ""/contentpacks/marketplace/install"", ""body"": json.dumps(data)})[0]['Contents'] return boolres ","def update_package(single_item): """""" Updates a single package on the marketplace Added dependency management, do not update if dependency is not installed """""" change_log_keys = list(single_item['changelog'].keys()) # Grab the latest version latest_ver = max(change_log_keys, key=major_minor_micro) # Grab Name of package id_item = single_item['id'] # Grab dependencies of package dependencies = single_item['dependencies'] # True for good to update, False for dependency missing boolres = checkDependencies(dependencies) if not boolres: return_error(f""Dependency missing from {id_item}, skipping.. Please update {id_item} manually"") return boolres data = { ""packs"": [{ ""id"": id_item, ""version"": latest_ver, ""transition"": None, ""skipInstall"": False }], ""ignoreWarnings"": False, ""transitionPrice"": 0 } demisto.executeCommand(""demisto-api-post"", {""uri"": ""/contentpacks/marketplace/install"", ""body"": json.dumps(data)})[0]['Contents'] return boolres " 39449,"def PolyLine() -> UnstructuredGrid: """"""Create a :class:`pyvista.UnstructuredGrid` containing a single poly line. This represents a set of line segments as a single cell. This cell corresponds to the :attr:`pyvista.CellType.POLY_LINE` cell type. Returns ------- pyvista.UnstructuredGrid UnstructuredGrid containing a single poly line. Examples -------- Create and plot a single poly line. >>> import pyvista as pv >>> grid = pv.cells.PolyLine() >>> pv.cells.plot_cell(grid) List the grid's cells. This could be any number of points. >>> grid.cells array([4, 0, 1, 2, 3]) List the grid's points. >>> grid.points pyvista_ndarray([[0. , 0. , 0. ], [0.5, 0. , 0. ], [0.5, 1. , 0. ], [0. , 1. , 0. ]]) >>> grid.celltypes # same as pyvista.CellType.POLY_LINE array([4], dtype=uint8) """""" points = [ [0.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 1.0, 0.0], [0.0, 1.0, 0.0], ] cells = [len(points)] + list(range(len(points))) return UnstructuredGrid(cells, [CellType.POLY_LINE], points) ","def PolyLine() -> UnstructuredGrid: """"""Create a :class:`pyvista.UnstructuredGrid` containing a single poly line. This represents a set of line segments as a single cell. This cell corresponds to the :attr:`pyvista.CellType.POLY_LINE` cell type. Returns ------- pyvista.UnstructuredGrid UnstructuredGrid containing a single polyline. Examples -------- Create and plot a single poly line. >>> import pyvista as pv >>> grid = pv.cells.PolyLine() >>> pv.cells.plot_cell(grid) List the grid's cells. This could be any number of points. >>> grid.cells array([4, 0, 1, 2, 3]) List the grid's points. >>> grid.points pyvista_ndarray([[0. , 0. , 0. ], [0.5, 0. , 0. ], [0.5, 1. , 0. ], [0. , 1. , 0. ]]) >>> grid.celltypes # same as pyvista.CellType.POLY_LINE array([4], dtype=uint8) """""" points = [ [0.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.5, 1.0, 0.0], [0.0, 1.0, 0.0], ] cells = [len(points)] + list(range(len(points))) return UnstructuredGrid(cells, [CellType.POLY_LINE], points) " 27843,"def _random_choice(xp, a, size, p): try: return xp.random.choice(a, size, p=p) except ValueError: # Validate the sum as NumPy PR #6131 (numpy>=1.10) tol = xp.finfo(p.dtype.eps) ** 0.5 p = p.astype(xp.float64) xp.testing.assert_allclose(p.sum(), 1, rtol=0, atol=tol) p /= p.sum() return xp.random.choice(a, size, p=p) ","def _random_choice(xp, a, size, p): try: return xp.random.choice(a, size, p=p) except ValueError: # Validate the sum of the probabilities as NumPy PR #6131 (numpy>=1.10) tol = xp.finfo(p.dtype.eps) ** 0.5 p = p.astype(xp.float64) xp.testing.assert_allclose(p.sum(), 1, rtol=0, atol=tol) p /= p.sum() return xp.random.choice(a, size, p=p) " 39101,"def build_rows_process(queue, users, directory_client, process_number, num_processes): """"""A process functions to process all users in parallel. Gets all users, but processes and buids the rows only for the subset of users assigned to the process. Args: queue: shared synchronized queue where results are stored users: list of all users directory_client: client for the Directory SDK API process_number: number of this process, from 0 to num_processes num_processes: total number of processes """""" rows = [] total_users = len(users) for i, user in enumerate(users, 1): if terminate_processes.value: break if i % num_processes == process_number: logger.debug(""Process #%s - User %d/%d: %s"", process_number, i, total_users, user) rows.extend(get_denormalized_scopes_for_user( directory_client, user)) # Post results in the queue, if not terminating if not terminate_processes.value: queue.put({ ""process_number"": process_number, ""message_type"": ""DATA"", ""data"": rows }) # Post process complete message queue.put({""process_number"": process_number, ""message_type"": ""STOP""}) ","def build_rows_process(queue, users, directory_client, process_number, num_processes): """"""A process functions to process all users in parallel. Gets all users, but processes and builds the rows only for the subset of users assigned to the process. Args: queue: shared synchronized queue where results are stored users: list of all users directory_client: client for the Directory SDK API process_number: number of this process, from 0 to num_processes num_processes: total number of processes """""" rows = [] total_users = len(users) for i, user in enumerate(users, 1): if terminate_processes.value: break if i % num_processes == process_number: logger.debug(""Process #%s - User %d/%d: %s"", process_number, i, total_users, user) rows.extend(get_denormalized_scopes_for_user( directory_client, user)) # Post results in the queue, if not terminating if not terminate_processes.value: queue.put({ ""process_number"": process_number, ""message_type"": ""DATA"", ""data"": rows }) # Post process complete message queue.put({""process_number"": process_number, ""message_type"": ""STOP""}) " 7897,"def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None, n_vf_iter=30, log=False, path_out=None, **kwargs): r""""""Convert point-wise cross section to multipole data via Vector Fitting. Parameters ---------- energy : np.ndarray Energy array ce_xs : np.ndarray Point-wise cross sections to be fitted mts : Iterable of int Reaction list rtol : float, optional Relative error tolerance atol : float, optional Absolute error tolerance orders : Iterable of int, optional A list of orders (number of poles) to be searched n_vf_iter : int, optional Number of maximum VF iterations log : bool or int, optional Whether to print running logs path_out : str, optional Path to save the figures **kwargs Additional keyword arguments Returns ------- Tuple (poles, residues) """""" # import vectfit package: https://github.com/liangjg/vectfit import vectfit as vf ne = energy.size nmt = len(mts) if ce_xs.shape != (nmt, ne): raise ValueError('Inconsistent cross section data.') # construct test data: interpolate xs with finer grids N_FINER = 10 ne_test = (ne-1)*N_FINER + 1 test_energy = np.interp(np.arange(ne_test), np.arange(ne_test, step=N_FINER), energy) test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue test_xs_ref = np.zeros((nmt, ne_test)) for i in range(nmt): test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i]) if log: print(""Energy: {:.3e} to {:.3e} eV ({} points)"".format( energy[0], energy[-1], ne)) # inputs f = ce_xs * energy # sigma*E s = np.sqrt(energy) # sqrt(E) test_s = np.sqrt(test_energy) weight = 1.0/f # very small cross sections can lead to huge weights, which will harm the # fitting accuracy MIN_CROSS_SECTION = 1e-7 for i in range(nmt): if np.all(ce_xs[i]<=MIN_CROSS_SECTION): weight[i] = 1.0 elif np.any(ce_xs[i]<=MIN_CROSS_SECTION): weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \ max(weight[i, ce_xs[i]>MIN_CROSS_SECTION]) # detect peaks (resonances) and determine VF order search range peaks, _ = find_peaks(ce_xs[0]+ce_xs[1]) n_peaks = peaks.size if orders is not None: # make sure orders are even integers orders = list(set([int(i/2)*2 for i in orders if i>=2])) else: lowest_order = max(2, 2*n_peaks) highest_order = max(200, 4*n_peaks) orders = list(range(lowest_order, highest_order+1, 2)) if log: print(""Found {} peaks"".format(n_peaks)) print(""Fitting orders from {} to {}"".format(orders[0], orders[-1])) # perform VF with increasing orders found_ideal = False n_discarded = 0 # for accelation, number of discarded searches best_quality = best_ratio = -np.inf for i, order in enumerate(orders): if log: print(""Order={}({}/{})"".format(order, i, len(orders))) # initial guessed poles poles = np.linspace(s[0], s[-1], order//2) poles = poles + poles*0.01j poles = np.sort(np.append(poles, np.conj(poles))) found_better = False # fitting iteration for i_vf in range(n_vf_iter): if log >= DETAILED_LOGGING: print(""VF iteration {}/{}"".format(i_vf+1, n_vf_iter)) # call vf try: poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight) except: break # convert real pole to conjugate pairs n_real_poles = 0 new_poles = [] for p in poles: p_r, p_i = np.real(p), np.imag(p) if (s[0] <= p_r <= s[-1]) and p_i == 0.: new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j] n_real_poles += 1 else: new_poles += [p] new_poles = np.array(new_poles) # re-calculate residues if poles changed if n_real_poles > 0: if log >= DETAILED_LOGGING: print("" # real poles: {}"".format(n_real_poles)) new_poles, residues, cf, f_fit, rms = \ vf.vectfit(f, s, new_poles, weight, skip_pole=True) # assess the result on test grid test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy abserr = np.abs(test_xs - test_xs_ref) relerr = abserr / test_xs_ref if np.any(np.isnan(abserr)): maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf elif np.all(abserr <= atol): maxre, ratio, ratio2 = 0., 1., 1. else: maxre = np.max(relerr[abserr > atol]) ratio = np.sum((relerr= DETAILED_LOGGING: print("" # poles: {}"".format(new_poles.size)) print("" Max relative error: {:.3f}%"".format(maxre*100)) print("" Satisfaction: {:.1f}%, {:.1f}%"".format(ratio*100, ratio2*100)) print("" Quality: {:.2f}"".format(quality)) if quality > best_quality: if log >= DETAILED_LOGGING: print("" Best by far!"") found_better = True best_quality, best_ratio = quality, ratio best_poles, best_residues = new_poles, residues best_test_xs, best_relerr = test_xs, relerr if best_ratio >= 1.0: if log: print(""Found ideal results. Stop!"") found_ideal = True break else: if log >= DETAILED_LOGGING: print("" Discarded!"") if found_ideal: break # acceleration if found_better: n_discarded = 0 else: if order > max(2*n_peaks, 50) and best_ratio > 0.7: n_discarded += 1 if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9): if log >= DETAILED_LOGGING: print(""Couldn't get better results. Stop!"") break # merge conjugate poles real_idx = [] conj_idx = [] found_conj = False for i, p in enumerate(best_poles): if found_conj: found_conj = False continue if np.imag(p) == 0.: real_idx.append(i) else: if i < best_poles.size and np.conj(p) == best_poles[i+1]: found_conj = True conj_idx.append(i) else: raise RuntimeError(""Complex poles are not conjugate!"") if log: print(""Found {} real poles and {} conjugate complex pairs."".format( len(real_idx), len(conj_idx))) mp_poles = best_poles[real_idx+conj_idx] mp_residues = np.concatenate((best_residues[:, real_idx], best_residues[:, conj_idx]*2), axis=1)/1j if log: print(""Final number of poles: {}"".format(mp_poles.size)) if path_out: import matplotlib matplotlib.use(""agg"") import matplotlib.pyplot as plt if not os.path.exists(path_out): os.makedirs(path_out) for i, mt in enumerate(mts): fig, ax1 = plt.subplots() lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label=""ACE xs"") lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label=""VF xs"") ax2 = ax1.twinx() lns3 = ax2.loglog(test_energy, best_relerr[i], 'r', label=""Relative error"", alpha=0.5) lns = lns1 + lns2 + lns3 labels = [l.get_label() for l in lns] ax1.legend(lns, labels, loc='best') ax1.set_xlabel('energy (eV)') ax1.set_ylabel('cross section (b)', color='b') ax1.tick_params('y', colors='b') ax2.set_ylabel('relative error', color='r') ax2.tick_params('y', colors='r') plt.title(""MT {} vectfitted with {} poles"".format(mt, mp_poles.size)) fig.tight_layout() fig_file = os.path.join(path_out, ""{:.0f}-{:.0f}_MT{}.png"".format( energy[0], energy[-1], mt)) plt.savefig(fig_file) plt.close() if log: print(""Saved figure: {}"".format(fig_file)) return (mp_poles, mp_residues) ","def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None, n_vf_iter=30, log=False, path_out=None, **kwargs): r""""""Convert point-wise cross section to multipole data via Vector Fitting. Parameters ---------- energy : np.ndarray Energy array ce_xs : np.ndarray Point-wise cross sections to be fitted mts : Iterable of int Reaction list rtol : float, optional Relative error tolerance atol : float, optional Absolute error tolerance orders : Iterable of int, optional A list of orders (number of poles) to be searched n_vf_iter : int, optional Number of maximum VF iterations log : bool or int, optional Whether to print running logs path_out : str, optional Path to save the figures **kwargs Additional keyword arguments Returns ------- Tuple (poles, residues) """""" # import vectfit package: https://github.com/liangjg/vectfit import vectfit as vf ne = energy.size nmt = len(mts) if ce_xs.shape != (nmt, ne): raise ValueError('Inconsistent cross section data.') # construct test data: interpolate xs with finer grids N_FINER = 10 ne_test = (ne-1)*N_FINER + 1 test_energy = np.interp(np.arange(ne_test), np.arange(ne_test, step=N_FINER), energy) test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue test_xs_ref = np.zeros((nmt, ne_test)) for i in range(nmt): test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i]) if log: print(""Energy: {:.3e} to {:.3e} eV ({} points)"".format( energy[0], energy[-1], ne)) # inputs f = ce_xs * energy # sigma*E s = np.sqrt(energy) # sqrt(E) test_s = np.sqrt(test_energy) weight = 1.0/f # very small cross sections can lead to huge weights, which will harm the # fitting accuracy MIN_CROSS_SECTION = 1e-7 for i in range(nmt): if np.all(ce_xs[i]<=MIN_CROSS_SECTION): weight[i] = 1.0 elif np.any(ce_xs[i]<=MIN_CROSS_SECTION): weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \ max(weight[i, ce_xs[i]>MIN_CROSS_SECTION]) # detect peaks (resonances) and determine VF order search range peaks, _ = find_peaks(ce_xs[0]+ce_xs[1]) n_peaks = peaks.size if orders is not None: # make sure orders are even integers orders = list(set([int(i/2)*2 for i in orders if i>=2])) else: lowest_order = max(2, 2*n_peaks) highest_order = max(200, 4*n_peaks) orders = list(range(lowest_order, highest_order+1, 2)) if log: print(""Found {} peaks"".format(n_peaks)) print(""Fitting orders from {} to {}"".format(orders[0], orders[-1])) # perform VF with increasing orders found_ideal = False n_discarded = 0 # for accelation, number of discarded searches best_quality = best_ratio = -np.inf for i, order in enumerate(orders): if log: print(""Order={}({}/{})"".format(order, i, len(orders))) # initial guessed poles poles = np.linspace(s[0], s[-1], order//2) poles += poles*0.01j poles = np.sort(np.append(poles, np.conj(poles))) found_better = False # fitting iteration for i_vf in range(n_vf_iter): if log >= DETAILED_LOGGING: print(""VF iteration {}/{}"".format(i_vf+1, n_vf_iter)) # call vf try: poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight) except: break # convert real pole to conjugate pairs n_real_poles = 0 new_poles = [] for p in poles: p_r, p_i = np.real(p), np.imag(p) if (s[0] <= p_r <= s[-1]) and p_i == 0.: new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j] n_real_poles += 1 else: new_poles += [p] new_poles = np.array(new_poles) # re-calculate residues if poles changed if n_real_poles > 0: if log >= DETAILED_LOGGING: print("" # real poles: {}"".format(n_real_poles)) new_poles, residues, cf, f_fit, rms = \ vf.vectfit(f, s, new_poles, weight, skip_pole=True) # assess the result on test grid test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy abserr = np.abs(test_xs - test_xs_ref) relerr = abserr / test_xs_ref if np.any(np.isnan(abserr)): maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf elif np.all(abserr <= atol): maxre, ratio, ratio2 = 0., 1., 1. else: maxre = np.max(relerr[abserr > atol]) ratio = np.sum((relerr= DETAILED_LOGGING: print("" # poles: {}"".format(new_poles.size)) print("" Max relative error: {:.3f}%"".format(maxre*100)) print("" Satisfaction: {:.1f}%, {:.1f}%"".format(ratio*100, ratio2*100)) print("" Quality: {:.2f}"".format(quality)) if quality > best_quality: if log >= DETAILED_LOGGING: print("" Best by far!"") found_better = True best_quality, best_ratio = quality, ratio best_poles, best_residues = new_poles, residues best_test_xs, best_relerr = test_xs, relerr if best_ratio >= 1.0: if log: print(""Found ideal results. Stop!"") found_ideal = True break else: if log >= DETAILED_LOGGING: print("" Discarded!"") if found_ideal: break # acceleration if found_better: n_discarded = 0 else: if order > max(2*n_peaks, 50) and best_ratio > 0.7: n_discarded += 1 if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9): if log >= DETAILED_LOGGING: print(""Couldn't get better results. Stop!"") break # merge conjugate poles real_idx = [] conj_idx = [] found_conj = False for i, p in enumerate(best_poles): if found_conj: found_conj = False continue if np.imag(p) == 0.: real_idx.append(i) else: if i < best_poles.size and np.conj(p) == best_poles[i+1]: found_conj = True conj_idx.append(i) else: raise RuntimeError(""Complex poles are not conjugate!"") if log: print(""Found {} real poles and {} conjugate complex pairs."".format( len(real_idx), len(conj_idx))) mp_poles = best_poles[real_idx+conj_idx] mp_residues = np.concatenate((best_residues[:, real_idx], best_residues[:, conj_idx]*2), axis=1)/1j if log: print(""Final number of poles: {}"".format(mp_poles.size)) if path_out: import matplotlib matplotlib.use(""agg"") import matplotlib.pyplot as plt if not os.path.exists(path_out): os.makedirs(path_out) for i, mt in enumerate(mts): fig, ax1 = plt.subplots() lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label=""ACE xs"") lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label=""VF xs"") ax2 = ax1.twinx() lns3 = ax2.loglog(test_energy, best_relerr[i], 'r', label=""Relative error"", alpha=0.5) lns = lns1 + lns2 + lns3 labels = [l.get_label() for l in lns] ax1.legend(lns, labels, loc='best') ax1.set_xlabel('energy (eV)') ax1.set_ylabel('cross section (b)', color='b') ax1.tick_params('y', colors='b') ax2.set_ylabel('relative error', color='r') ax2.tick_params('y', colors='r') plt.title(""MT {} vectfitted with {} poles"".format(mt, mp_poles.size)) fig.tight_layout() fig_file = os.path.join(path_out, ""{:.0f}-{:.0f}_MT{}.png"".format( energy[0], energy[-1], mt)) plt.savefig(fig_file) plt.close() if log: print(""Saved figure: {}"".format(fig_file)) return (mp_poles, mp_residues) " 59838,"def _get_cpu_count(): if platform.system() != ""Windows"": cpu_count = os.cpu_count() max_cores = min(cpu_count, int(os.getenv('MAX_BUILD_CORES', cpu_count))) return max_cores return 0 ","def _get_cpu_count(): if platform.system() != ""Windows"": cpu_count = os.cpu_count() try: user_max_cores = int(os.getenv('MAX_BUILD_CORES'), cpu_count) except ValueError as e: # print useful error message raise e max_cores = min(cpu_count, user_max_cores) return max_cores return 0 " 22746,"def _execute_build(target, archs, status, workspace): process = subprocess.Popen([ 'snapcraft', 'remote-build', '--launchpad-accept-public-upload', '--recover', '--build-on', ','.join(archs) ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=workspace) process_output = [] for line in process.stdout: process_output.append(line) _extract_state(target, line, status) return process.wait(), process_output ","def _execute_build(target, archs, status, workspace): process = subprocess.Popen([ 'snapcraft', 'remote-build', '--launchpad-accept-public-upload', '--recover', '--build-on', ','.join(archs) ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, cwd=workspace) process_output = [] for line in process.stdout: process_output.append(line) _extract_state(target, line, status) return process.wait(), process_output " 2301,"def test_sgd_optimizer_no_momentum(): params = [np.zeros(shape) for shape in shapes] rng = np.random.RandomState(0) for lr in [10 ** i for i in range(-3, 4)]: optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False) grads = [rng.random(shape) for shape in shapes] expected = [param - lr * grad for param, grad in zip(params, grads)] optimizer.update_params(grads) for exp, param in zip(expected, optimizer.params): assert_array_equal(exp, param) ","def test_sgd_optimizer_no_momentum(): params = [np.zeros(shape) for shape in shapes] rng = np.random.RandomState(0) for lr in [10 ** i for i in range(-3, 4)]: optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False) grads = [rng.random_sample(shape) for shape in shapes] expected = [param - lr * grad for param, grad in zip(params, grads)] optimizer.update_params(grads) for exp, param in zip(expected, optimizer.params): assert_array_equal(exp, param) " 6777,"def execute(): contact_details = frappe.get_list(""Contact"", fields=[""name"", ""email_id"", ""phone"", ""mobile_no"", ""modified_by"", ""creation"", ""modified""]) frappe.reload_doc(""contacts"", ""doctype"", ""contact_email"") frappe.reload_doc(""contacts"", ""doctype"", ""contact_phone"") frappe.reload_doc(""contacts"", ""doctype"", ""contact"") name_counter = 100000000 for contact_detail in contact_details: contact_name = contact_detail.name.replace('""', '\\""').replace(""'"", ""\\'"") if contact_detail.email_id: frappe.db.sql("""""" INSERT INTO `tabContact Email` (`idx`, `name`, `email_id`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (1, ""{0}"", ""{1}"", ""email_ids"", ""Contact"", ""{2}"", 1, ""{3}"", ""{4}"", ""{5}"") """""".format(str(name_counter), contact_detail.email_id, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1 if contact_detail.phone: frappe.db.sql("""""" INSERT INTO `tabContact Phone` (`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (1, ""{0}"", ""{1}"", ""phone_nos"", ""Contact"", ""{2}"", 1, ""{3}"", ""{4}"", ""{5}"") """""".format(str(name_counter), contact_detail.phone, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1 if contact_detail.mobile_no: frappe.db.sql("""""" INSERT INTO `tabContact Phone` (`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (2, ""{0}"", ""{1}"", ""phone_nos"", ""Contact"", ""{2}"", 0, ""{3}"", ""{4}"", ""{5}"") """""".format(str(name_counter), contact_detail.mobile_no, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1","def execute(): contact_details = frappe.get_list(""Contact"", fields=[""name"", ""email_id"", ""phone"", ""mobile_no"", ""modified_by"", ""creation"", ""modified""]) frappe.reload_doc(""contacts"", ""doctype"", ""contact_email"") frappe.reload_doc(""contacts"", ""doctype"", ""contact_phone"") frappe.reload_doc(""contacts"", ""doctype"", ""contact"") name_counter = 100000000 for contact_detail in contact_details: contact_name = contact_detail.name.replace('""', '\\""').replace(""'"", ""\\'"") if contact_detail.email_id: frappe.db.sql("""""" INSERT INTO `tabContact Email` (`idx`, `name`, `email_id`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (1, ""{0}"", ""{1}"", ""email_ids"", ""Contact"", ""{2}"", 1, ""{3}"", ""{4}"", ""{5}"") """""".format(str(name_counter), contact_detail.email_id, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1 if contact_detail.phone: frappe.db.sql("""""" INSERT INTO `tabContact Phone` (`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (1, ""{0}"", ""{1}"", ""phone_nos"", ""Contact"", ""{2}"", 1, ""{3}"", ""{4}"", ""{5}"") """""".format(frappe.generate_hash(contact_name, 10), contact_detail.phone, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1 if contact_detail.mobile_no: frappe.db.sql("""""" INSERT INTO `tabContact Phone` (`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`, `modified`, `modified_by`) VALUES (2, ""{0}"", ""{1}"", ""phone_nos"", ""Contact"", ""{2}"", 0, ""{3}"", ""{4}"", ""{5}"") """""".format(str(name_counter), contact_detail.mobile_no, contact_name, contact_detail.creation, contact_detail.modified, contact_detail.modified_by)) name_counter += 1" 47255,"def create_dynamic_module(name: Union[str, os.PathLike]): """""" Creates a dynamic module in the cache directory for modules. """""" init_hf_modules() dynamic_module_path = Path(HF_MODULES_CACHE) / name # If th parent module does not exist yet, recusrively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent) os.makedirs(dynamic_module_path, exist_ok=True) init_path = dynamic_module_path / ""__init__.py"" if not init_path.exists(): init_path.touch() ","def create_dynamic_module(name: Union[str, os.PathLike]): """""" Creates a dynamic module in the cache directory for modules. """""" init_hf_modules() dynamic_module_path = Path(HF_MODULES_CACHE) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent) os.makedirs(dynamic_module_path, exist_ok=True) init_path = dynamic_module_path / ""__init__.py"" if not init_path.exists(): init_path.touch() " 57806,"def fetch_indicators(client: Client, feed_name: List[str], first_fetch_date: Optional[datetime], feed_tags: list = [], tlp_color: Optional[str] = None, is_test: bool = False) -> List[Dict]: """"""Retrieves indicators from the feed Args: client: Client object with request feed_tags: feed tags. first_fetch_date: The date from which to start fetching feeds tlp_color: Traffic Light Protocol color. feed_name: the feed names to fetch is_test: Indicates whether it is a test or not Returns: List. Processed indicator from feed. """""" fetch_date = demisto.getIntegrationContext().get('last_fetch_date') if fetch_date: fetch_date = arg_to_datetime(fetch_date) else: fetch_date = first_fetch_date if fetch_date.date() >= fetch_date.now().date(): fetch_date = fetch_date.now() fetch_time_stamp = '' else: fetch_time_stamp = f'_{str(fetch_date.date())}' indicators = [] temp = {} if 'all' in feed_name: feed_name = ['autorun-registry', 'banking-dns', 'dga-dns', 'dll-hijacking-dns', 'doc-net-com-dns', 'downloaded-pe-dns', 'dynamic-dns', 'irc-dns', 'modified-hosts-dns', 'parked-dns', 'public-ip-check-dns', 'ransomware-dns', 'rat-dns', 'scheduled-tasks', 'sinkholed-ip-dns', 'stolen-cert-dns'] for name in feed_name: result = client.get_indicators(feed_name=name, time_stamp=fetch_time_stamp) for obj in result: feed_related_indicators = [{'type': 'IP', 'value': ip} for ip in obj.get('ips')] feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_md5')}) feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_sha256')}) feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_sha1')}) if obj.get('domain') in temp: if name not in temp[obj.get('domain')]['fields']['Tags']: temp[obj.get('domain')]['fields']['Tags'].append(name) temp[obj.get('domain')]['fields']['FeedRelatedIndicators'].extend( related for related in feed_related_indicators if related not in temp[obj.get('domain')]['fields']['FeedRelatedIndicators']) else: temp[obj.get('domain')] = { ""value"": obj.get('domain'), ""type"": 'Domain', ""rawJSON"": obj, ""fields"": { ""Tags"": [name], ""reportedby"": 'CiscoSMA', ""FeedRelatedIndicators"": feed_related_indicators, ""Description"": obj.get('description') } } if tlp_color: temp[obj.get('domain')]['fields']['trafficlightprotocol'] = tlp_color if feed_tags: temp[obj.get('domain')]['fields']['trafficlightprotocol'] += feed_tags for key in temp: indicators.append(temp[key]) next_fatch = (dateparser.parse('tomorrow', settings={'RELATIVE_BASE': fetch_date})).isoformat() if not is_test: demisto.setIntegrationContext({'last_fetch_date': next_fatch}) demisto.debug(f'{len(indicators)} XSOAR Indicators were created.') return indicators ","def fetch_indicators(client: Client, feed_name: List[str], first_fetch_date: Optional[datetime], feed_tags: list = [], tlp_color: Optional[str] = None, is_test: bool = False) -> List[Dict]: """"""Retrieves indicators from the feed Args: client: Client object with request feed_tags: feed tags. first_fetch_date: The date from which to start fetching feeds tlp_color: Traffic Light Protocol color. feed_name: the feed names to fetch is_test: Indicates whether it is a test or not Returns: List. Processed indicator from feed. """""" fetch_date = demisto.getIntegrationContext().get('last_fetch_date') if fetch_date: fetch_date = arg_to_datetime(fetch_date) else: fetch_date = first_fetch_date if fetch_date.date() >= fetch_date.now().date(): fetch_date = fetch_date.now() fetch_time_stamp = '' else: fetch_time_stamp = f'_{str(fetch_date.date())}' indicators = [] temp = {} if 'all' in feed_name: feed_name = ['autorun-registry', 'banking-dns', 'dga-dns', 'dll-hijacking-dns', 'doc-net-com-dns', 'downloaded-pe-dns', 'dynamic-dns', 'irc-dns', 'modified-hosts-dns', 'parked-dns', 'public-ip-check-dns', 'ransomware-dns', 'rat-dns', 'scheduled-tasks', 'sinkholed-ip-dns', 'stolen-cert-dns'] for name in feed_name: result = client.get_indicators(feed_name=name, time_stamp=fetch_time_stamp) for obj in result: feed_related_indicators = [{'type': 'IP', 'value': ip} for ip in obj.get('ips')] feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_md5')}) feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_sha256')}) feed_related_indicators.append({'type': 'File', 'value': obj.get('sample_sha1')}) current_domain = obj.get('domain') if current_domain in temp: if name not in temp[obj.get('domain')]['fields']['Tags']: temp[obj.get('domain')]['fields']['Tags'].append(name) temp[obj.get('domain')]['fields']['FeedRelatedIndicators'].extend( related for related in feed_related_indicators if related not in temp[obj.get('domain')]['fields']['FeedRelatedIndicators']) else: temp[obj.get('domain')] = { ""value"": obj.get('domain'), ""type"": 'Domain', ""rawJSON"": obj, ""fields"": { ""Tags"": [name], ""reportedby"": 'CiscoSMA', ""FeedRelatedIndicators"": feed_related_indicators, ""Description"": obj.get('description') } } if tlp_color: temp[obj.get('domain')]['fields']['trafficlightprotocol'] = tlp_color if feed_tags: temp[obj.get('domain')]['fields']['trafficlightprotocol'] += feed_tags for key in temp: indicators.append(temp[key]) next_fatch = (dateparser.parse('tomorrow', settings={'RELATIVE_BASE': fetch_date})).isoformat() if not is_test: demisto.setIntegrationContext({'last_fetch_date': next_fatch}) demisto.debug(f'{len(indicators)} XSOAR Indicators were created.') return indicators " 11805,"def add_modulo(image1, image2): """"""Add two images, without clipping the result. At least one of the images must be ""1"" mode. .. code-block:: python out = ((image1 + image2) % MAX) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_add_modulo(image2.im)) ","def add_modulo(image1, image2): """"""Add two images, without clipping the result. At least one of the images must have mode ""1"". .. code-block:: python out = ((image1 + image2) % MAX) :rtype: :py:class:`~PIL.Image.Image` """""" image1.load() image2.load() return image1._new(image1.im.chop_add_modulo(image2.im)) " 25771,"def ac_dc_meshed(update=False): """""" Load the meshed AC-DC network example of pypsa stored in the PyPSA repository. Parameters ---------- update : bool, optional Whether to update the locally stored network data. The default is False. Returns ------- pyspa.Network """""" name = ""ac-dc-meshed"" repofile = ""examples/ac-dc-meshed/ac-dc-data.nc"" path = _retrieve_if_not_local(name, repofile, update=update) return Network(path) ","def ac_dc_meshed(update=False): """""" Load the meshed AC-DC network example of pypsa stored in the PyPSA repository. Parameters ---------- update : bool, optional Whether to update the locally stored network data. The default is False. Returns ------- pypsa.Network """""" name = ""ac-dc-meshed"" repofile = ""examples/ac-dc-meshed/ac-dc-data.nc"" path = _retrieve_if_not_local(name, repofile, update=update) return Network(path) " 17427,"def open_zarr( store, group=None, synchronizer=None, chunks=""auto"", decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, chunk_store=None, storage_options=None, decode_timedelta=None, use_cftime=None, **kwargs, ): """"""Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, optional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks : bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. chunk_store : MutableMapping, optional A separate Zarr store only for chunk data. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. ""gregorian"", ""proleptic_gregorian"", ""standard"", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/ """""" from .api import open_dataset if chunks == ""auto"": try: import dask.array # noqa chunks = {} except ImportError: chunks = None if kwargs: raise TypeError( ""open_zarr() got unexpected keyword arguments "" + "","".join(kwargs.keys()) ) backend_kwargs = { ""synchronizer"": synchronizer, ""consolidated"": consolidated, ""overwrite_encoded_chunks"": overwrite_encoded_chunks, ""chunk_store"": chunk_store, ""storage_options"": storage_options, } ds = open_dataset( filename_or_obj=store, group=group, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine=""zarr"", chunks=chunks, drop_variables=drop_variables, backend_kwargs=backend_kwargs, decode_timedelta=decode_timedelta, use_cftime=use_cftime, ) return ds ","def open_zarr( store, group=None, synchronizer=None, chunks=""auto"", decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, chunk_store=None, storage_options=None, decode_timedelta=None, use_cftime=None, **kwargs, ): """"""Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, optional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks : bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. chunk_store : MutableMapping, optional A separate Zarr store only for chunk data. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. ""gregorian"", ""proleptic_gregorian"", ""standard"", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/ """""" from .api import open_dataset if chunks == ""auto"": try: import dask.array # noqa chunks = {} except ImportError: chunks = None if kwargs: raise TypeError( ""open_zarr() got unexpected keyword arguments "" + "","".join(kwargs.keys()) ) backend_kwargs = { ""synchronizer"": synchronizer, ""consolidated"": consolidated, ""overwrite_encoded_chunks"": overwrite_encoded_chunks, ""chunk_store"": chunk_store, ""storage_options"": storage_options, } ds = open_dataset( filename_or_obj=store, group=group, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine=""zarr"", chunks=chunks, drop_variables=drop_variables, backend_kwargs=backend_kwargs, decode_timedelta=decode_timedelta, use_cftime=use_cftime, ) return ds " 56364,"def pytest_addoption(parser: Parser) -> None: group = parser.getgroup(""general"") group.addoption( ""--sw"", ""--stepwise"", action=""store_true"", default=False, dest=""stepwise"", help=""exit on test failure and continue from last failing test next time"", ) group.addoption( ""--stepwise-skip"", ""--sw-skip"", action=""store_true"", default=False, dest=""stepwise_skip"", help=""ignore the first failing test but stop on the next failing test"", ) ","def pytest_addoption(parser: Parser) -> None: group = parser.getgroup(""general"") group.addoption( ""--sw"", ""--stepwise"", action=""store_true"", default=False, dest=""stepwise"", help=""exit on test failure and continue from last failing test next time"", ) group.addoption( ""--sw-skip"", ""--stepwise-skip"", action=""store_true"", default=False, dest=""stepwise_skip"", help=""ignore the first failing test but stop on the next failing test"", ) " 21278,"def yieldable_gather_results(func, iter, *args, **kwargs): """"""Executes the function with each argument concurrently. Args: func (func): Function to execute that returns a Deferred iter (iter): An iterable that yields items that get passed as the first argument to the function *args: Arguments to be passed to each call to func Returns Deferred: Resolved when all functions have been invoked, or errors if one of the function calls fails. """""" return logcontext.make_deferred_yieldable(defer.gatherResults([ run_in_background(func, item, *args, **kwargs) for item in iter ], consumeErrors=True)).addErrback(unwrapFirstError) ","def yieldable_gather_results(func, iter, *args, **kwargs): """"""Executes the function with each argument concurrently. Args: func (func): Function to execute that returns a Deferred iter (iter): An iterable that yields items that get passed as the first argument to the function *args: Arguments to be passed to each call to func Returns Deferred[list]: Resolved when all functions have been invoked, or errors if one of the function calls fails. """""" return logcontext.make_deferred_yieldable(defer.gatherResults([ run_in_background(func, item, *args, **kwargs) for item in iter ], consumeErrors=True)).addErrback(unwrapFirstError) " 22651,"def _generate_rst(gallery_conf, fname, content): """""" Helper function returning the rst text a given example content. This writes a file gallery_conf['examples_dir']/fname with *content*, creates the corresponding rst file by running generate_file_rst() and returns the generated rest code. Parameters ---------- gallery_conf A gallery_conf as cerated by the gallery_conf fixture. fname : str A filename; e.g. 'test.py'. This is relative to gallery_conf['examples_dir'] content : str The content of fname. Returns ------- rst : str The generated rst code. """""" with codecs.open(os.path.join(gallery_conf['examples_dir'], fname), mode='w', encoding='utf-8') as f: f.write('\n'.join(content)) # generate rst file sg.generate_file_rst(fname, gallery_conf['gallery_dir'], gallery_conf['examples_dir'], gallery_conf) # read rst file and check if it contains code output rst_fname = os.path.splitext(fname)[0] + '.rst' with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname), mode='r', encoding='utf-8') as f: rst = f.read() return rst ","def _generate_rst(gallery_conf, fname, content): """""" Helper function returning the rST text of a given example content. This writes a file gallery_conf['examples_dir']/fname with *content*, creates the corresponding rst file by running generate_file_rst() and returns the generated rest code. Parameters ---------- gallery_conf A gallery_conf as cerated by the gallery_conf fixture. fname : str A filename; e.g. 'test.py'. This is relative to gallery_conf['examples_dir'] content : str The content of fname. Returns ------- rst : str The generated rst code. """""" with codecs.open(os.path.join(gallery_conf['examples_dir'], fname), mode='w', encoding='utf-8') as f: f.write('\n'.join(content)) # generate rst file sg.generate_file_rst(fname, gallery_conf['gallery_dir'], gallery_conf['examples_dir'], gallery_conf) # read rst file and check if it contains code output rst_fname = os.path.splitext(fname)[0] + '.rst' with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname), mode='r', encoding='utf-8') as f: rst = f.read() return rst " 55056,"def spectrum(qnode, encoding_gates=None): r""""""Compute the frequency spectrum of the Fourier representation of simple quantum circuits. The circuit must only use single-parameter gates of the form :math:`e^{-i x_j G}` as input-encoding gates, which allows the computation of the spectrum by inspecting the gates' generators :math:`G`. Gates are marked as input-encoding gates in the quantum function by giving them an ``id``. If two gates have the same ``id``, they are considered to be used to encode the same input :math:`x_j`. The `encoding_gates` argument can be used if only gates with a specific ``id`` should be interpreted as input-encoding gates. Args: qnode (pennylane.QNode): a quantum node representing a circuit in which input-encoding gates are marked by their ``id`` attribute encoding_gates (list[str]): list of input-encoding gate ``id`` strings for which to compute the frequency spectra Returns: (dict[str, list[float]]): Dictionary with the input-encoding gate ``id`` as keys and their frequency spectra as values. **Details** A circuit that returns an expectation value which depends on :math:`N` scalar inputs :math:`x_i` can be interpreted as a function :math:`f: \mathbb{R}^N \rightarrow \mathbb{R}`. This function can always be expressed by a Fourier-type sum .. math:: \sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N} c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N} over the *frequency spectra* :math:`\Omega_i \subseteq \mathbb{R},` :math:`i=1,\dots,N`. Each spectrum has the property that :math:`0 \in \Omega_i`, and the spectrum is symmetric (for every :math:`\omega \in \Omega_i` we have that :math:`-\omega \in \Omega_i`). If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*. As shown in `Vidal and Theis (2019) `_ and `Schuld, Sweke and Meyer (2020) `_, if an input :math:`x_j, j = 1 \dots N` only enters into single-parameter gates of the form :math:`e^{-i x_j G}`, the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues of the generators :math:`G`. In many situations, the spectra are limited to a few frequencies only, which in turn limits the function class that the circuit can express. The ``spectrum`` function computes all frequencies that are allowed to appear in the sets :math:`\Omega_1` to :math:`\Omega_N` (which correspond to the :math:`N` different strings used as an ``id`` to mark the input-encoding gates). **Example** Consider the following example, which uses non-trainable inputs ``x`` and trainable parameters ``w`` as arguments to the qnode. .. code-block:: python import pennylane as qml import numpy as np from pennylane.fourier import spectrum n_layers = 2 n_qubits = 3 dev = qml.device(""default.qubit"", wires=n_qubits) @qml.qnode(dev) def circuit(x, w): for l in range(n_layers): for i in range(n_qubits): qml.RX(x[i], wires=0, id=""x""+str(i)) qml.Rot(w[l,i,0], w[l,i,1], w[l,i,2], wires=0) qml.RZ(x[0], wires=0, id=""x0"") return qml.expval(qml.PauliZ(wires=0)) x = np.array([1, 2, 3]) w = np.random.random((n_layers, n_qubits, 3)) res = spectrum(circuit)(x, w) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0] >>> 'x1': [-2.0, -1.0, 0.0, 1.0, 2.0] >>> 'x2': [-2.0, -1.0, 0.0, 1.0, 2.0] .. note:: While the Fourier spectrum usually does not depend on trainable circuit parameters and the actual values of the inputs, it may still change with arguments of the QNode that determine the architecture of the circuit. The input-encoding gates to consider can also be explicitly selected by using the ``encoding_gates`` keyword argument: .. code-block:: python dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev) def circuit(x): qml.RX(x[0], wires=0, id=""x0"") qml.PhaseShift(x[0], wires=0, id=""x0"") qml.RX(x[1], wires=0, id=""x1"") return qml.expval(qml.PauliZ(wires=0)) x = np.array([1, 2]) res = spectrum(circuit, encoding_gates=[""x0""])(x) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-2.0, -1.0, 0.0, 1.0, 2.0] .. note:: The `spectrum` function does not check if the result of the circuit is an expectation, or if gates with the same `id` take the same value in a given call of the function. The `spectrum` function works in all interfaces: .. code-block:: python import tensorflow as tf dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev, interface='tf') def circuit(x): qml.RX(x[0], wires=0, id=""x0"") qml.PhaseShift(x[1], wires=0, id=""x1"") return qml.expval(qml.PauliZ(wires=0)) x = tf.constant([1, 2]) res = spectrum(circuit)(x) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-1.0, 0.0, 1.0] >>> 'x1': [-1.0, 0.0, 1.0] """""" @wraps(qnode) def wrapper(*args, **kwargs): qnode.construct(args, kwargs) tape = qnode.qtape freqs = {} for op in tape.operations: id = op.id # if the operator has no specific ID, # move to the next if id is None: continue # if user has not specified encoding_gate id's, # consider any id is_encoding_gate = encoding_gates is None or id in encoding_gates if is_encoding_gate: if len(op.parameters) != 1: raise ValueError( ""can only consider one-parameter gates as data-encoding gates; "" ""got {}."".format(op.name) ) spec = _get_spectrum(op) # if id has been seen before, # join this spectrum to another one if id in freqs: spec = _join_spectra(freqs[id], spec) freqs[id] = spec return freqs return wrapper ","def spectrum(qnode, encoding_gates=None): r""""""Compute the frequency spectrum of the Fourier representation of simple quantum circuits. The circuit must only use single-parameter gates of the form :math:`e^{-i x_j G}` as input-encoding gates, which allows the computation of the spectrum by inspecting the gates' generators :math:`G`. Gates are marked as input-encoding gates in the quantum function by giving them an ``id``. If two gates have the same ``id``, they are considered to be used to encode the same input :math:`x_j`. The `encoding_gates` argument can be used to indicate that only gates with a specific ``id`` should be interpreted as input-encoding gates. Args: qnode (pennylane.QNode): a quantum node representing a circuit in which input-encoding gates are marked by their ``id`` attribute encoding_gates (list[str]): list of input-encoding gate ``id`` strings for which to compute the frequency spectra Returns: (dict[str, list[float]]): Dictionary with the input-encoding gate ``id`` as keys and their frequency spectra as values. **Details** A circuit that returns an expectation value which depends on :math:`N` scalar inputs :math:`x_i` can be interpreted as a function :math:`f: \mathbb{R}^N \rightarrow \mathbb{R}`. This function can always be expressed by a Fourier-type sum .. math:: \sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N} c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N} over the *frequency spectra* :math:`\Omega_i \subseteq \mathbb{R},` :math:`i=1,\dots,N`. Each spectrum has the property that :math:`0 \in \Omega_i`, and the spectrum is symmetric (for every :math:`\omega \in \Omega_i` we have that :math:`-\omega \in \Omega_i`). If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*. As shown in `Vidal and Theis (2019) `_ and `Schuld, Sweke and Meyer (2020) `_, if an input :math:`x_j, j = 1 \dots N` only enters into single-parameter gates of the form :math:`e^{-i x_j G}`, the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues of the generators :math:`G`. In many situations, the spectra are limited to a few frequencies only, which in turn limits the function class that the circuit can express. The ``spectrum`` function computes all frequencies that are allowed to appear in the sets :math:`\Omega_1` to :math:`\Omega_N` (which correspond to the :math:`N` different strings used as an ``id`` to mark the input-encoding gates). **Example** Consider the following example, which uses non-trainable inputs ``x`` and trainable parameters ``w`` as arguments to the qnode. .. code-block:: python import pennylane as qml import numpy as np from pennylane.fourier import spectrum n_layers = 2 n_qubits = 3 dev = qml.device(""default.qubit"", wires=n_qubits) @qml.qnode(dev) def circuit(x, w): for l in range(n_layers): for i in range(n_qubits): qml.RX(x[i], wires=0, id=""x""+str(i)) qml.Rot(w[l,i,0], w[l,i,1], w[l,i,2], wires=0) qml.RZ(x[0], wires=0, id=""x0"") return qml.expval(qml.PauliZ(wires=0)) x = np.array([1, 2, 3]) w = np.random.random((n_layers, n_qubits, 3)) res = spectrum(circuit)(x, w) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0] >>> 'x1': [-2.0, -1.0, 0.0, 1.0, 2.0] >>> 'x2': [-2.0, -1.0, 0.0, 1.0, 2.0] .. note:: While the Fourier spectrum usually does not depend on trainable circuit parameters and the actual values of the inputs, it may still change with arguments of the QNode that determine the architecture of the circuit. The input-encoding gates to consider can also be explicitly selected by using the ``encoding_gates`` keyword argument: .. code-block:: python dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev) def circuit(x): qml.RX(x[0], wires=0, id=""x0"") qml.PhaseShift(x[0], wires=0, id=""x0"") qml.RX(x[1], wires=0, id=""x1"") return qml.expval(qml.PauliZ(wires=0)) x = np.array([1, 2]) res = spectrum(circuit, encoding_gates=[""x0""])(x) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-2.0, -1.0, 0.0, 1.0, 2.0] .. note:: The `spectrum` function does not check if the result of the circuit is an expectation, or if gates with the same `id` take the same value in a given call of the function. The `spectrum` function works in all interfaces: .. code-block:: python import tensorflow as tf dev = qml.device(""default.qubit"", wires=1) @qml.qnode(dev, interface='tf') def circuit(x): qml.RX(x[0], wires=0, id=""x0"") qml.PhaseShift(x[1], wires=0, id=""x1"") return qml.expval(qml.PauliZ(wires=0)) x = tf.constant([1, 2]) res = spectrum(circuit)(x) for inp, freqs in res.items(): print(f""{inp}: {freqs}"") >>> 'x0': [-1.0, 0.0, 1.0] >>> 'x1': [-1.0, 0.0, 1.0] """""" @wraps(qnode) def wrapper(*args, **kwargs): qnode.construct(args, kwargs) tape = qnode.qtape freqs = {} for op in tape.operations: id = op.id # if the operator has no specific ID, # move to the next if id is None: continue # if user has not specified encoding_gate id's, # consider any id is_encoding_gate = encoding_gates is None or id in encoding_gates if is_encoding_gate: if len(op.parameters) != 1: raise ValueError( ""can only consider one-parameter gates as data-encoding gates; "" ""got {}."".format(op.name) ) spec = _get_spectrum(op) # if id has been seen before, # join this spectrum to another one if id in freqs: spec = _join_spectra(freqs[id], spec) freqs[id] = spec return freqs return wrapper " 40591,"def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm=""kmeans"", feature=None, **algorithm_kwds): if algorithm == ""kmeans"": algorithm_kwds.setdefault('n_init', 1000) algorithm_kwds.setdefault('max_iter', 30000) algorithm_kwds.setdefault('tol', 1e-6) if algorithm == ""hac"": feature = get_feature_for_hac(n, buses_i=n.buses.index, feature=feature) elif feature is not None: logger.info(f""keyword argument feature is only valid for algorithm 'hac'."" f""given feature {feature} will be ignored."") n.determine_network_topology() n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name) def reduce_network(n, buses): nr = pypsa.Network() nr.import_components_from_dataframe(buses, ""Bus"") nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], ""Line"") return nr def busmap_for_country(x): prefix = x.name[0] + x.name[1] + ' ' logger.debug(f""Determining busmap for country {prefix[:-1]}"") if len(x) == 1: return pd.Series(prefix + '0', index=x.index) weight = weighting_for_country(n, x) if algorithm == ""kmeans"": return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds) elif algorithm == ""spectral"": return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) elif algorithm == ""louvain"": return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) elif algorithm == ""hac"": return prefix + busmap_by_hac(n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]) else: raise ValueError(f""`algorithm` must be one of 'kmeans', 'hac', 'spectral' or 'louvain'. Is {algorithm}."") return (n.buses.groupby(['country', 'sub_network'], group_keys=False) .apply(busmap_for_country).squeeze().rename('busmap')) ","def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm=""kmeans"", feature=None, **algorithm_kwds): if algorithm == ""kmeans"": algorithm_kwds.setdefault('n_init', 1000) algorithm_kwds.setdefault('max_iter', 30000) algorithm_kwds.setdefault('tol', 1e-6) if algorithm == ""hac"": feature = get_feature_for_hac(n, buses_i=n.buses.index, feature=feature) elif feature is not None: logger.warning(f""Keyword argument feature is only valid for algorithm 'hac'."" f""given feature {feature} will be ignored."") n.determine_network_topology() n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name) def reduce_network(n, buses): nr = pypsa.Network() nr.import_components_from_dataframe(buses, ""Bus"") nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], ""Line"") return nr def busmap_for_country(x): prefix = x.name[0] + x.name[1] + ' ' logger.debug(f""Determining busmap for country {prefix[:-1]}"") if len(x) == 1: return pd.Series(prefix + '0', index=x.index) weight = weighting_for_country(n, x) if algorithm == ""kmeans"": return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds) elif algorithm == ""spectral"": return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) elif algorithm == ""louvain"": return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds) elif algorithm == ""hac"": return prefix + busmap_by_hac(n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]) else: raise ValueError(f""`algorithm` must be one of 'kmeans', 'hac', 'spectral' or 'louvain'. Is {algorithm}."") return (n.buses.groupby(['country', 'sub_network'], group_keys=False) .apply(busmap_for_country).squeeze().rename('busmap')) " 28615,"def plot_autocorr( data, var_names=None, filter_vars=None, max_lag=None, combined=False, grid=None, figsize=None, textsize=None, labeller=None, ax=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """"""Bar plot of the autocorrelation function for a sequence of data. Useful in particular for posteriors from MCMC samples which may display correlation. Parameters ---------- data : InferenceData Any object that can be converted to an :class:`arviz.InferenceData` object refer to documentation of :func:`arviz.convert_to_dataset` for details var_names : list of variable names, optional Variables to be plotted, if None all variables are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. Vector-value stochastics are handled automatically. filter_vars : {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. max_lag : int, optional Maximum lag to calculate autocorrelation. Defaults to 100 or num draws, whichever is smaller. combined : bool, default=False Flag for combining multiple chains into a single chain. If False, chains will be plotted separately. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize : tuple Figure size. If None it will be defined automatically. Note this is not used if ``ax`` is supplied. textsize : float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. ax : numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend : str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_config : dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs : dict, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show : bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- autocov : Compute autocovariance estimates for every lag for the input array. autocorr : Compute autocorrelation using FFT for every lag for the input array. Examples -------- Plot default autocorrelation .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_autocorr(data) Plot subset variables by specifying variable name exactly .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['mu', 'tau'] ) Combine chains by variable and select variables by excluding some with partial naming .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['~thet'], filter_vars=""like"", combined=True) Specify maximum lag (x axis bound) .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True) """""" data = convert_to_dataset(data, group=""posterior"") var_names = _var_names(var_names, data, filter_vars) # Default max lag to 100 or max length of chain if max_lag is None: max_lag = min(100, data[""draw""].shape[0]) if labeller is None: labeller = BaseLabeller() plotters = filter_plotters_list( list(xarray_var_iter(data, var_names, combined)), ""plot_autocorr"" ) rows, cols = default_grid(len(plotters), grid=grid) autocorr_plot_args = dict( axes=ax, plotters=plotters, max_lag=max_lag, figsize=figsize, rows=rows, cols=cols, combined=combined, textsize=textsize, labeller=labeller, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() if backend == ""bokeh"": autocorr_plot_args.update(backend_config=backend_config) # TODO: Add backend kwargs plot = get_plotting_function(""plot_autocorr"", ""autocorrplot"", backend) axes = plot(**autocorr_plot_args) return axes ","def plot_autocorr( data, var_names=None, filter_vars=None, max_lag=None, combined=False, grid=None, figsize=None, textsize=None, labeller=None, ax=None, backend=None, backend_config=None, backend_kwargs=None, show=None, ): """"""Bar plot of the autocorrelation function for a sequence of data. Useful in particular for posteriors from MCMC samples which may display correlation. Parameters ---------- data : InferenceData Any object that can be converted to an :class:`arviz.InferenceData` object refer to documentation of :func:`arviz.convert_to_dataset` for details var_names : list of str, optional Variables to be plotted, if None all variables are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. Vector-value stochastics are handled automatically. filter_vars : {None, ""like"", ""regex""}, optional, default=None If `None` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. max_lag : int, optional Maximum lag to calculate autocorrelation. Defaults to 100 or num draws, whichever is smaller. combined : bool, default=False Flag for combining multiple chains into a single chain. If False, chains will be plotted separately. grid : tuple Number of rows and columns. Defaults to None, the rows and columns are automatically inferred. figsize : tuple Figure size. If None it will be defined automatically. Note this is not used if ``ax`` is supplied. textsize : float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on ``figsize``. labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. ax : numpy array-like of matplotlib axes or bokeh figures, optional A 2D array of locations into which to plot the densities. If not supplied, Arviz will create its own array of plot areas (and return it). backend : str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_config : dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in ``rcParams``. backend_kwargs : dict, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show : bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- autocov : Compute autocovariance estimates for every lag for the input array. autocorr : Compute autocorrelation using FFT for every lag for the input array. Examples -------- Plot default autocorrelation .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('centered_eight') >>> az.plot_autocorr(data) Plot subset variables by specifying variable name exactly .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['mu', 'tau'] ) Combine chains by variable and select variables by excluding some with partial naming .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['~thet'], filter_vars=""like"", combined=True) Specify maximum lag (x axis bound) .. plot:: :context: close-figs >>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True) """""" data = convert_to_dataset(data, group=""posterior"") var_names = _var_names(var_names, data, filter_vars) # Default max lag to 100 or max length of chain if max_lag is None: max_lag = min(100, data[""draw""].shape[0]) if labeller is None: labeller = BaseLabeller() plotters = filter_plotters_list( list(xarray_var_iter(data, var_names, combined)), ""plot_autocorr"" ) rows, cols = default_grid(len(plotters), grid=grid) autocorr_plot_args = dict( axes=ax, plotters=plotters, max_lag=max_lag, figsize=figsize, rows=rows, cols=cols, combined=combined, textsize=textsize, labeller=labeller, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() if backend == ""bokeh"": autocorr_plot_args.update(backend_config=backend_config) # TODO: Add backend kwargs plot = get_plotting_function(""plot_autocorr"", ""autocorrplot"", backend) axes = plot(**autocorr_plot_args) return axes " 7065,"def runN_remover(workflow_id: str) -> str: if re.findall(r'(.*)\/run\d+$', workflow_id): return re.findall(r'(.*)\/run\d+$', workflow_id)[0] else: return workflow_id ","def get_workflow_name_from_id(workflow_id: str) -> str: if re.findall(r'(.*)\/run\d+$', workflow_id): return re.findall(r'(.*)\/run\d+$', workflow_id)[0] else: return workflow_id " 46401,"def _do_stratification( data, ratio=0.75, min_rating=1, filter_by=""user"", is_random=True, seed=42, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL, col_timestamp=DEFAULT_TIMESTAMP_COL, ): # A few preliminary checks. if not (filter_by == ""user"" or filter_by == ""item""): raise ValueError(""filter_by should be either 'user' or 'item'."") if min_rating < 1: raise ValueError(""min_rating should be integer and larger than or equal to 1."") if col_user not in data.columns: raise ValueError(""Schema of data not valid. Missing User Col"") if col_item not in data.columns: raise ValueError(""Schema of data not valid. Missing Item Col"") if not is_random: if col_timestamp not in data.columns: raise ValueError(""Schema of data not valid. Missing Timestamp Col"") multi_split, ratio = process_split_ratio(ratio) split_by_column = col_user if filter_by == ""user"" else col_item ratio = ratio if multi_split else [ratio, 1 - ratio] if min_rating > 1: data = min_rating_filter_pandas( data, min_rating=min_rating, filter_by=filter_by, col_user=col_user, col_item=col_item, ) if is_random: np.random.seed(seed) data[""random""] = np.random.rand(data.shape[0]) order_by = ""random"" else: order_by = col_timestamp data = data.sort_values([split_by_column, order_by]) groups = data.groupby(split_by_column) data[""count""] = groups[split_by_column].transform(""count"") data[""rank""] = groups.cumcount() + 1 if is_random: data = data.drop(""random"", axis=1) splits = [] prev_threshold = None for threshold in np.cumsum(ratio): condition = data[""rank""] <= round(threshold * data[""count""]) if prev_threshold is not None: condition &= data[""rank""] > round(prev_threshold * data[""count""]) splits.append(data[condition].drop(""rank"", axis=1).drop(""count"", axis=1)) prev_threshold = threshold return splits ","def _do_stratification( data, ratio=0.75, min_rating=1, filter_by=""user"", is_random=True, seed=42, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL, col_timestamp=DEFAULT_TIMESTAMP_COL, ): # A few preliminary checks. if not (filter_by == ""user"" or filter_by == ""item""): raise ValueError(""filter_by should be either 'user' or 'item'."") if min_rating < 1: raise ValueError(""min_rating should be integer and larger than or equal to 1."") if col_user not in data.columns: raise ValueError(""Schema of data not valid. Missing User Col"") if col_item not in data.columns: raise ValueError(""Schema of data not valid. Missing Item Col"") if not is_random: if col_timestamp not in data.columns: raise ValueError(""Schema of data not valid. Missing Timestamp Col"") multi_split, ratio = process_split_ratio(ratio) split_by_column = col_user if filter_by == ""user"" else col_item ratio = ratio if multi_split else [ratio, 1 - ratio] if min_rating > 1: data = min_rating_filter_pandas( data, min_rating=min_rating, filter_by=filter_by, col_user=col_user, col_item=col_item, ) if is_random: np.random.seed(seed) data[""random""] = np.random.rand(data.shape[0]) order_by = ""random"" else: order_by = col_timestamp data = data.sort_values([split_by_column, order_by]) groups = data.groupby(split_by_column) data[""count""] = groups[split_by_column].transform(""count"") data[""rank""] = groups.cumcount() + 1 if is_random: data = data.drop(""random"", axis=1) splits = [] prev_threshold = None for threshold in np.cumsum(ratio): condition = data[""rank""] <= round(threshold * data[""count""]) if prev_threshold is not None: condition &= data[""rank""] > round(prev_threshold * data[""count""]) splits.append(data[condition].drop([""rank"", ""count""], axis=1)) prev_threshold = threshold return splits " 31740,"def list_activities_command(client: Client, args: dict): url_suffix = '/activities/' activity_id = args.get('activity_id') custom_filter = args.get('custom_filter') arguments = assign_params(**args) timeout = int(arguments.get('timeout', 60)) or 60 request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments, activity_id) activities_response_data = client.list_activities(url_suffix, request_data, timeout) list_activities = activities_response_data.get('data') if activities_response_data.get('data') \ else [activities_response_data] activities = arrange_entities_data(list_activities) return create_ip_command_results(activities) ","def list_activities_command(client: Client, args: dict): url_suffix = '/activities/' activity_id = args.get('activity_id') custom_filter = args.get('custom_filter') arguments = assign_params(**args) timeout = arg_to_number(arguments.get('timeout', 60)) or 60 request_data, url_suffix = build_filter_and_url_to_search_with(url_suffix, custom_filter, arguments, activity_id) activities_response_data = client.list_activities(url_suffix, request_data, timeout) list_activities = activities_response_data.get('data') if activities_response_data.get('data') \ else [activities_response_data] activities = arrange_entities_data(list_activities) return create_ip_command_results(activities) " 58806,"def test_convert_follow_node_with_integer_arguments(): """"""Tests the conversion of a follow op with integer arguments + constant float args. The follow op should convert the floating point argument into fp16 as constants/vars will always be converted if safe to do so. """""" data = relay.var(""data"", shape=[1, 10], dtype=""float32"") # We add have an addition to make sure the input indices to take are not a # var (which are always casted if safe) indices = relay.var(""indices"", shape=[1, 1], dtype=""int32"") + relay.const(0, dtype=""int32"") take = relay.take(data, indices, axis=0) mod = tvm.IRModule.from_expr(take) mod_params = { ""data"": np.random.uniform(-1, 1, size=[1, 10]).astype(""float32""), ""indices"": np.array([[0]]).astype(""int32""), } output_mod = verify_mixed_precision_output_close(mod, mod_params, atol=0.01, rtol=0.01) # Create expected module data = relay.cast(relay.var(""data"", shape=[1, 10]), ""float16"") take = relay.take(data, indices, axis=0) expected_mod = tvm.IRModule.from_expr(take) expected_mod = InferType()(expected_mod) assert tvm.ir.structural_equal(expected_mod, output_mod) ","def test_convert_follow_node_with_integer_arguments(): """"""Tests the conversion of a follow op with integer arguments + constant float args. The follow op should convert the floating point argument into fp16 as constants/vars will always be converted if safe to do so. """""" data = relay.var(""data"", shape=[1, 10], dtype=""float32"") # We use an addition to make sure the input indices are not a var # (which are always casted if safe) indices = relay.var(""indices"", shape=[1, 1], dtype=""int32"") + relay.const(0, dtype=""int32"") take = relay.take(data, indices, axis=0) mod = tvm.IRModule.from_expr(take) mod_params = { ""data"": np.random.uniform(-1, 1, size=[1, 10]).astype(""float32""), ""indices"": np.array([[0]]).astype(""int32""), } output_mod = verify_mixed_precision_output_close(mod, mod_params, atol=0.01, rtol=0.01) # Create expected module data = relay.cast(relay.var(""data"", shape=[1, 10]), ""float16"") take = relay.take(data, indices, axis=0) expected_mod = tvm.IRModule.from_expr(take) expected_mod = InferType()(expected_mod) assert tvm.ir.structural_equal(expected_mod, output_mod) " 57907,"def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" # Get API key for authentication. api_key = demisto.params().get('apikey') # Get helios service API url. base_url = demisto.params()['url'] # if your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor verify_certificate = not demisto.params().get('insecure', False) # if your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = demisto.params().get('proxy', False) demisto.debug(f'Command being called is {demisto.command()}') try: # Prepare client and set authentication headers. headers: Dict = { 'apikey': api_key, 'Content-Type': 'application/json', } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif demisto.command() == 'cohesity-helios-get-ransomware-alerts': return_results(get_ransomware_alerts_command(client, demisto.args())) elif demisto.command() == 'cohesity-helios-ignore-anomalous-object': return_results(ignore_ransomware_anomaly_command(client, demisto.args())) elif demisto.command() == 'cohesity-helios-restore-latest-clean-snapshot': return_results(restore_latest_clean_snapshot(client, demisto.args())) elif demisto.command() == 'fetch-incidents': fetch_incidents_command(client) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: """"""main function, parses params and runs command functions :return: :rtype: """""" params = demisto.params() # Get API key for authentication. api_key = params.get('apikey') # Get helios service API url. base_url = params.get('url') # if your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor verify_certificate = not params.get('insecure', False) # if your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = params.get('proxy', False) demisto.debug(f'Command being called is {demisto.command()}') try: # Prepare client and set authentication headers. headers: Dict = { 'apikey': api_key, 'Content-Type': 'application/json', } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) if demisto.command() == 'test-module': # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif demisto.command() == 'cohesity-helios-get-ransomware-alerts': return_results(get_ransomware_alerts_command(client, demisto.args())) elif demisto.command() == 'cohesity-helios-ignore-anomalous-object': return_results(ignore_ransomware_anomaly_command(client, demisto.args())) elif demisto.command() == 'cohesity-helios-restore-latest-clean-snapshot': return_results(restore_latest_clean_snapshot(client, demisto.args())) elif demisto.command() == 'fetch-incidents': fetch_incidents_command(client) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') " 4632,"def cache(func, memory, func_memory_level=None, memory_level=None, shelve=False, **kwargs): """"""Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func : function The function which output is to be cached. memory : instance of joblib.Memory, string or pathlib.Path Used to cache the function call. func_memory_level : int, optional The memory_level from which caching must be enabled for the wrapped function. memory_level : int, optional The memory_level used to determine if function call must be cached or not (if user_memory_level is equal of greater than func_memory_level the function is cached). shelve : bool, optional Whether to return a joblib MemorizedResult, callable by a .get() method, instead of the return value of func. Default=False. kwargs : keyword arguments, optional The keyword arguments passed to memory.cache. Returns ------- mem : joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving Object that wraps the function func to cache its further call. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a callable object is always returned. """""" verbose = kwargs.get('verbose', 0) # memory_level and func_memory_level must be both None or both integers. memory_levels = [memory_level, func_memory_level] both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels) both_params_none = all(lvl is None for lvl in memory_levels) if not (both_params_integers or both_params_none): raise ValueError('Reference and user memory levels must be both None ' 'or both integers.') if memory is not None and (func_memory_level is None or memory_level >= func_memory_level): memory = stringify_path(memory) if isinstance(memory, str): memory = Memory(location=memory, verbose=verbose) if not isinstance(memory, MEMORY_CLASSES): raise TypeError(""'memory' argument must be a string or a "" ""joblib.Memory object. "" ""%s %s was given."" % (memory, type(memory))) if (memory.location is None and memory_level is not None and memory_level > 1): warnings.warn(""Caching has been enabled (memory_level = %d) "" ""but no Memory object or path has been provided"" "" (parameter memory). Caching deactivated for "" ""function %s."" % (memory_level, func.__name__), stacklevel=2) else: memory = Memory(location=None, verbose=verbose) cached_func = _safe_cache(memory, func, **kwargs) if shelve: cached_func = _ShelvedFunc(cached_func) return cached_func ","def cache(func, memory, func_memory_level=None, memory_level=None, shelve=False, **kwargs): """"""Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func : function The function which output is to be cached. memory : instance of :class:`joblib.Memory`, obj:`str` or :class:`pathlib.Path` Used to cache the function call. func_memory_level : int, optional The memory_level from which caching must be enabled for the wrapped function. memory_level : int, optional The memory_level used to determine if function call must be cached or not (if user_memory_level is equal of greater than func_memory_level the function is cached). shelve : bool, optional Whether to return a joblib MemorizedResult, callable by a .get() method, instead of the return value of func. Default=False. kwargs : keyword arguments, optional The keyword arguments passed to memory.cache. Returns ------- mem : joblib.MemorizedFunc, wrapped in _ShelvedFunc if shelving Object that wraps the function func to cache its further call. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a callable object is always returned. """""" verbose = kwargs.get('verbose', 0) # memory_level and func_memory_level must be both None or both integers. memory_levels = [memory_level, func_memory_level] both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels) both_params_none = all(lvl is None for lvl in memory_levels) if not (both_params_integers or both_params_none): raise ValueError('Reference and user memory levels must be both None ' 'or both integers.') if memory is not None and (func_memory_level is None or memory_level >= func_memory_level): memory = stringify_path(memory) if isinstance(memory, str): memory = Memory(location=memory, verbose=verbose) if not isinstance(memory, MEMORY_CLASSES): raise TypeError(""'memory' argument must be a string or a "" ""joblib.Memory object. "" ""%s %s was given."" % (memory, type(memory))) if (memory.location is None and memory_level is not None and memory_level > 1): warnings.warn(""Caching has been enabled (memory_level = %d) "" ""but no Memory object or path has been provided"" "" (parameter memory). Caching deactivated for "" ""function %s."" % (memory_level, func.__name__), stacklevel=2) else: memory = Memory(location=None, verbose=verbose) cached_func = _safe_cache(memory, func, **kwargs) if shelve: cached_func = _ShelvedFunc(cached_func) return cached_func " 55660,"def convert_points_from_homogeneous(points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r""""""Function that converts points from homogeneous to Euclidean space. Args: points: the points to be transformed of shape :math:`(B, N, D)`. eps: to avoid division by zero. Returns: the points in Euclidean space :math:`(B, N, D-1)`. Examples: >>> input = torch.tensor([[0., 0., 1.]]) >>> convert_points_from_homogeneous(input) tensor([[0., 0.]]) """""" if not isinstance(points, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(points)}"") if len(points.shape) < 2: raise ValueError(f""Input must be at least a 2D tensor. Got {points.shape}"") # we check for points at max_val z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale = torch.where(mask, torch.ones_like(z_vec) / (z_vec + eps), torch.ones_like(z_vec)) return scale * points[..., :-1] ","def convert_points_from_homogeneous(points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r""""""Function that converts points from homogeneous to Euclidean space. Args: points: the points to be transformed of shape :math:`(B, N, D)`. eps: to avoid division by zero. Returns: the points in Euclidean space :math:`(B, N, D-1)`. Examples: >>> input = torch.tensor([[0., 0., 1.]]) >>> convert_points_from_homogeneous(input) tensor([[0., 0.]]) """""" if not isinstance(points, torch.Tensor): raise TypeError(f""Input type is not a torch.Tensor. Got {type(points)}"") if len(points.shape) < 2: raise ValueError(f""Input must be at least a 2D tensor. Got {points.shape}"") # we check for points at max_val z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps ones = torch.ones_like(z_vec) scale = torch.where(mask, ones / (z_vec + eps), ones) return scale * points[..., :-1] " 4482,"def _validate_nirs_info(info, *, throw_errors=True, fnirs=None, which=None, check_bads=True, allow_empty=True): """"""Apply all checks to fNIRS info. Works on all continuous wave types."""""" _validate_type(fnirs, (None, str), 'fnirs') kinds = dict( od='optical density', cw_amplitude='continuous wave', hbx='chromophore', ) _check_option('fnirs', fnirs, (None,) + tuple(kinds)) if fnirs is not None: kind = kinds[fnirs] fnirs = ['hbo', 'hbr'] if fnirs == 'hbx' else f'fnirs_{fnirs}' if not len(pick_types(info, fnirs=fnirs)): raise RuntimeError( f'{which} must operate on {kind} data, but none was found.') freqs = np.unique(_channel_frequencies(info, throw_errors=throw_errors)) if freqs.size > 0: pair_vals = freqs else: pair_vals = np.unique(_channel_chromophore(info)) out = _check_channels_ordered( info, pair_vals, throw_errors=throw_errors, check_bads=check_bads) return out ","def _validate_nirs_info(info, *, throw_errors=True, fnirs=None, which=None, check_bads=True, allow_empty=True): """"""Apply all checks to fNIRS info. Works on all continuous wave types."""""" _validate_type(fnirs, (None, str), 'fnirs') kinds = dict( od='optical density', cw_amplitude='continuous wave', hbx='chromophore', ) _check_option('fnirs', fnirs, (None,) + tuple(kinds)) if fnirs is not None: kind = kinds[fnirs] fnirs = ['hbo', 'hbr'] if fnirs == 'hbx' else f'fnirs_{fnirs}' if not len(pick_types(info, fnirs=fnirs)): raise RuntimeError( f'{which} must operate on {kind} data, but none was found.') freqs = np.unique(_channel_frequencies(info)) if freqs.size > 0: pair_vals = freqs else: pair_vals = np.unique(_channel_chromophore(info)) out = _check_channels_ordered( info, pair_vals, throw_errors=throw_errors, check_bads=check_bads) return out " 4555,"def test_repr_niimgs(): # Tests with file path assert _utils._repr_niimgs(""test"") == ""test"" assert _utils._repr_niimgs(""test"", shorten=False) == ""test"" # Shortening long names by default assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file') == '/this/is/a/very/lo...') # Explicit shortening of long names assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file', shorten=True) == '/this/is/a/very/lo...') # Force long display of long names assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file', shorten=False) == '/this/is/a/very/long/name/for/a/nifti/file') # Tests with list of file paths assert _utils._repr_niimgs([""test"", ""retest""]) == ""[test, retest]"" assert _utils._repr_niimgs([""test"", ""retest""], shorten=False) == ""[test, retest]"" # Lists of long names up to length 3 list_of_size_3 = ['/this/is/a/very/long/name/for/a/nifti/file', '/this/is/another/very/long/name/for/a/nifti/file', '/this/is/again/another/very/long/name/for/a/nifti/file'] # Explicit shortening, all 3 names are displayed, but shortened assert (_utils._repr_niimgs(list_of_size_3, shorten=True) == ""[/this/is/a/very/lo..., /this/is/another/v..., /this/is/again/ano...]"") # Force display, all 3 names are displayed assert (_utils._repr_niimgs(list_of_size_3, shorten=False) == (""[/this/is/a/very/long/name/for/a/nifti/file,"" "" /this/is/another/very/long/name/for/a/nifti/file,"" "" /this/is/again/another/very/long/name/for/a/nifti/file]"")) # Lists longer than 3 # Small names - Explicit shortening assert (_utils._repr_niimgs([""test"", ""retest"", ""reretest"", ""rereretest""], shorten=True) == (""[test,\n"" "" ...\n"" "" rereretest]"")) # Small names - Force full display assert (_utils._repr_niimgs([""test"", ""retest"", ""reretest"", ""rereretest""], shorten=False) == (""[test,\n"" "" retest,\n"" "" reretest,\n"" "" rereretest]"")) # Long names - Explicit shortening list_of_size_4 = ['/this/is/a/very/long/name/for/a/nifti/file', '/this/is/another/very/long/name/for/a/nifti/file', '/this/is/again/another/very/long/name/for/a/nifti/file', '/this/is/again/another/super/very/long/name/for/a/nifti/file'] assert (_utils._repr_niimgs(list_of_size_4, shorten=True) == (""[/this/is/a/very/lo...,\n"" "" ...\n"" "" /this/is/again/ano...]"")) # Long names - Force full display in pretty print style for readability assert (_utils._repr_niimgs(list_of_size_4, shorten=False) == (""[/this/is/a/very/long/name/for/a/nifti/file,\n"" "" /this/is/another/very/long/name/for/a/nifti/file,\n"" "" /this/is/again/another/very/long/name/for/a/nifti/file,\n"" "" /this/is/again/another/super/very/long/name/for/a/nifti/file]"")) # Create phony Niimg without filename affine = np.eye(4) shape = (10, 10, 10) img1 = Nifti1Image(np.ones(shape), affine) # Shorten has no effect in this case for shorten in [True, False]: assert ( _utils._repr_niimgs(img1, shorten=shorten).replace(""10L"",""10"") == (""%s(\nshape=%s,\naffine=%s\n)"" % (img1.__class__.__name__, repr(shape), repr(affine)))) # Add filename long enough to qualify for shortening _, tmpimg1 = tempfile.mkstemp(suffix='_long.nii') nibabel.save(img1, tmpimg1) assert ( _utils._repr_niimgs(img1, shorten=False) == (""%s('%s')"" % (img1.__class__.__name__, img1.get_filename()))) assert ( _utils._repr_niimgs(img1, shorten=True) == (""%s('%s...')"" % (img1.__class__.__name__, img1.get_filename()[:18]))) ","def test_repr_niimgs(): # Tests with file path assert _utils._repr_niimgs(""test"") == ""test"" assert _utils._repr_niimgs(""test"", shorten=False) == ""test"" # Shortening long names by default assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file') == '/this/is/a/very/lo...') # Explicit shortening of long names assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file', shorten=True) == '/this/is/a/very/lo...') # Force long display of long names assert (_utils._repr_niimgs('/this/is/a/very/long/name/for/a/nifti/file', shorten=False) == '/this/is/a/very/long/name/for/a/nifti/file') # Tests with list of file paths assert _utils._repr_niimgs([""test"", ""retest""]) == ""[test, retest]"" assert _utils._repr_niimgs([""test"", ""retest""], shorten=False) == ""[test, retest]"" # Lists of long names up to length 3 list_of_size_3 = ['/this/is/a/very/long/name/for/a/nifti/file', '/this/is/another/very/long/name/for/a/nifti/file', '/this/is/again/another/very/long/name/for/a/nifti/file'] # Explicit shortening, all 3 names are displayed, but shortened assert (_utils._repr_niimgs(list_of_size_3, shorten=True) == ""[/this/is/a/very/lo..., /this/is/another/v..., /this/is/again/ano...]"") # Force display, all 3 names are displayed assert (_utils._repr_niimgs(list_of_size_3, shorten=False) == (""[/this/is/a/very/long/name/for/a/nifti/file,"" "" /this/is/another/very/long/name/for/a/nifti/file,"" "" /this/is/again/another/very/long/name/for/a/nifti/file]"")) # Lists longer than 3 # Small names - Explicit shortening assert (_utils._repr_niimgs([""test"", ""retest"", ""reretest"", ""rereretest""], shorten=True) == (""[test,\n"" "" ...\n"" "" rereretest]"")) # Small names - Force full display assert (_utils._repr_niimgs([""test"", ""retest"", ""reretest"", ""rereretest""], shorten=False) == (""[test,\n"" "" retest,\n"" "" reretest,\n"" "" rereretest]"")) # Long names - Explicit shortening list_of_size_4 = ['/this/is/a/very/long/name/for/a/nifti/file', '/this/is/another/very/long/name/for/a/nifti/file', '/this/is/again/another/very/long/name/for/a/nifti/file', '/this/is/again/another/super/very/long/name/for/a/nifti/file'] assert (_utils._repr_niimgs(list_of_size_4, shorten=True) == (""[/this/is/a/very/lo...,\n"" "" ...\n"" "" /this/is/again/ano...]"")) # Long names - Force full display in pretty print style for readability assert (_utils._repr_niimgs(list_of_size_4, shorten=False) == (""[/this/is/a/very/long/name/for/a/nifti/file,\n"" "" /this/is/another/very/long/name/for/a/nifti/file,\n"" "" /this/is/again/another/very/long/name/for/a/nifti/file,\n"" "" /this/is/again/another/super/very/long/name/for/a/nifti/file]"")) # Create phony Niimg without filename affine = np.eye(4) shape = (10, 10, 10) img1 = Nifti1Image(np.ones(shape), affine) # Shorten has no effect in this case for shorten in [True, False]: assert ( _utils._repr_niimgs(img1, shorten=shorten).replace(""10L"", ""10"") == (""%s(\nshape=%s,\naffine=%s\n)"" % (img1.__class__.__name__, repr(shape), repr(affine)))) # Add filename long enough to qualify for shortening _, tmpimg1 = tempfile.mkstemp(suffix='_long.nii') nibabel.save(img1, tmpimg1) assert ( _utils._repr_niimgs(img1, shorten=False) == (""%s('%s')"" % (img1.__class__.__name__, img1.get_filename()))) assert ( _utils._repr_niimgs(img1, shorten=True) == (""%s('%s...')"" % (img1.__class__.__name__, img1.get_filename()[:18]))) " 31807,"def main() -> None: params = demisto.params() client_id: str = params['credentials']['identifier'] client_secret: str = params['credentials']['password'] base_url: str = params['url'].rstrip('/') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) # Fetch incident related params: first_fetch_time = params.get('first_fetch', '3 days') fetch_limit = arg_to_number(params.get('max_fetch', LIMIT_DEFAULT)) fetch_state = params.get('state') fetch_severity = params.get('severity') fetch_status = ','.join(STATUS_MAP.get(x) for x in argToList(params.get('status', []))) fetch_app_ids = ','.join(argToList(params.get('app_ids', []))) commands = { 'test-module': test_module, 'saas-security-incidents-get': get_incidents_command, 'saas-security-incident-get-by-id': get_incident_by_id_command, 'saas-security-incident-state-update': update_incident_state_command, 'saas-security-get-apps': get_apps_command, 'saas-security-asset-remediate': remediate_asset_command, 'saas-security-remediation-status-get': get_remediation_status_command, } command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url=base_url, client_id=client_id, client_secret=client_secret, verify=verify_certificate, proxy=proxy, ) if command == 'test-module': return_results(test_module(client, params.get('isFetch'), first_fetch_time, fetch_state, fetch_severity, fetch_status, fetch_app_ids)) if command == 'fetch-incidents': fetch_incidents(client, first_fetch_time, fetch_limit, fetch_state, fetch_severity, fetch_status, fetch_app_ids) if command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ","def main() -> None: params = demisto.params() client_id: str = params['credentials']['identifier'] client_secret: str = params['credentials']['password'] base_url: str = params['url'].rstrip('/') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) # Fetch incident related params: first_fetch_time = params.get('first_fetch', '3 days') fetch_limit = arg_to_number(params.get('max_fetch', LIMIT_DEFAULT)) fetch_state = params.get('state') fetch_severity = params.get('severity') fetch_status = ','.join(STATUS_MAP.get(x) for x in argToList(params.get('status', []))) fetch_app_ids = ','.join(argToList(params.get('app_ids', []))) commands = { 'test-module': test_module, 'saas-security-incidents-get': get_incidents_command, 'saas-security-incident-get-by-id': get_incident_by_id_command, 'saas-security-incident-state-update': update_incident_state_command, 'saas-security-get-apps': get_apps_command, 'saas-security-asset-remediate': remediate_asset_command, 'saas-security-remediation-status-get': get_remediation_status_command, } command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url=base_url, client_id=client_id, client_secret=client_secret, verify=verify_certificate, proxy=proxy, ) if command == 'test-module': return_results(test_module(client, params.get('isFetch'), first_fetch_time, fetch_state, fetch_severity, fetch_status, fetch_app_ids)) if command == 'fetch-incidents': fetch_incidents(client, first_fetch_time, fetch_limit, fetch_state, fetch_severity, fetch_status, fetch_app_ids) if command in commands: return_results(commands[command](client, demisto.args())) else: raise NotImplementedError(f'Command ""{command}"" is not implemented.') except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') " 46546,"def set_some_new_deposits(spec, state, rng): num_validators = len(state.validators) # last 10th of validators are new deposits for i in range(len(state.validators))[0:num_validators // 10]: index = num_validators - 1 - i mock_deposit(spec, state, index) # Set half to eligible for activation if i % 2 == 0: state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state) ","def set_some_new_deposits(spec, state, rng): num_validators = len(state.validators) # last 10th of validators are new deposits for i in range(len(state.validators))[0:num_validators // 10]: index = num_validators - 1 - i mock_deposit(spec, state, index) # Set half to eligible for activation if rng.choice([True, False]): state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state) " 19988,"def median_blur(gray_img, ksize): """"""Applies a median blur filter (applies median value to central pixel within a kernel size). Inputs: gray_img = Grayscale image data ksize = kernel size => integer or tuple, ksize x ksize box if integer, (n, m) size box if tuple Returns: img_mblur = blurred image :param gray_img: numpy.ndarray :param ksize: int or tuple :return img_mblur: numpy.ndarray """""" # Make sure ksize is valid if type(ksize) != int and type(ksize) != tuple: fatal_error(""Invalid ksize, must be integer or tuple"") img_mblur = median_filter(gray_img, size=ksize) params.device += 1 if params.debug == 'print': print_image(img_mblur, os.path.join(params.debug_outdir, str(params.device) + '_median_blur' + str(ksize) + '.png')) elif params.debug == 'plot': plot_image(img_mblur, cmap='gray') return img_mblur ","def median_blur(gray_img, ksize): """"""Applies a median blur filter (applies median value to central pixel within a kernel size). Inputs: gray_img = Grayscale image data ksize = kernel size => integer or tuple, ksize x ksize box if integer, (n, m) size box if tuple Returns: img_mblur = blurred image :param gray_img: numpy.ndarray :param ksize: int or tuple :return img_mblur: numpy.ndarray """""" # Make sure ksize is valid if type(ksize) is not int and type(ksize) is not tuple: fatal_error(""Invalid ksize, must be integer or tuple"") img_mblur = median_filter(gray_img, size=ksize) params.device += 1 if params.debug == 'print': print_image(img_mblur, os.path.join(params.debug_outdir, str(params.device) + '_median_blur' + str(ksize) + '.png')) elif params.debug == 'plot': plot_image(img_mblur, cmap='gray') return img_mblur " 43948,"def drawable_grid(ops, wire_map=None): """"""Determine non-overlapping yet dense placement of operations for drawing. Returns structure compatible with ``qml.circuit_drawer.Grid``. Args: ops Iterable[~.Operator]: a list of operations Keyword Args: wire_map=None (dict): a map from wire label to non-negative integers Returns: List[List[~.Operator]] : layers compatible with grid objects """""" if wire_map is None: wire_map = default_wire_map(ops) if len(ops) == 0: if len(wire_map) == 0: return [[]] return [[] for _ in range(len(wire_map))] ops_per_layer = drawable_layers(ops, wire_map=wire_map) n_wires = len(wire_map) n_layers = len(ops_per_layer) grid = [[None for _ in range(n_layers)] for _ in range(n_wires)] for layer, layer_ops in enumerate(ops_per_layer): for op in layer_ops: if len(op.wires) == 0: # apply to all wires, like state and sample for wire in range(n_wires): grid[wire][layer] = op for wire in op.wires: grid[wire_map[wire]][layer] = op return grid ","def drawable_grid(ops, wire_map=None): """"""Determine non-overlapping yet dense placement of operations for drawing. Returns structure compatible with ``qml.circuit_drawer.Grid``. Args: ops Iterable[~.Operator]: a list of operations Keyword Args: wire_map=None (dict): a map from wire label to non-negative integers Returns: List[List[~.Operator]] : layers compatible with grid objects """""" if wire_map is None: wire_map = default_wire_map(ops) if len(ops) == 0: if len(wire_map) == 0: return [[]] return [[] for _ in range(len(wire_map))] ops_per_layer = drawable_layers(ops, wire_map=wire_map) n_wires = len(wire_map) n_layers = len(ops_per_layer) grid = [[None for _ in range(n_layers)] for _ in range(n_wires)] for layer, layer_ops in enumerate(ops_per_layer): for op in layer_ops: if len(op.wires) == 0: # apply to all wires, like qml.state and qml.sample for wire in range(n_wires): grid[wire][layer] = op for wire in op.wires: grid[wire_map[wire]][layer] = op return grid " 33483,"def createParser(filename, real_filename=None, tags=None): """""" Create a parser from a file or returns None on error. Options: - file (str|io.IOBase): Input file name or a byte io.IOBase stream ; - real_filename (str): Real file name. """""" if not tags: tags = [] stream = FileInputStream(filename, real_filename, tags=tags) guess = guessParser(stream) if not guess: stream.close() return guess ","def createParser(filename, real_filename=None, tags=None): """""" Create a parser from a file or returns None on error. Options: - file (str|io.IOBase): Input file name or a byte io.IOBase stream ; - real_filename (str): Real file name. """""" if not tags: tags = [] stream = FileInputStream(filename, real_filename, tags=tags) guess = guessParser(stream) if guess is None: stream.close() return guess " 7198,"def regionprops_table(label_image, intensity_image=None, properties=('label', 'bbox'), *, cache=True, separator='-'): """"""Compute image properties and return them as a pandas-compatible table. The table is a dictionary mapping column names to value arrays. See Notes section below for details. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. properties : tuple or list of str, optional Properties that will be included in the resulting dictionary For a list of available properties, please see :func:`regionprops`. Users should remember to add ""label"" to keep track of region identities. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. separator : str, optional For non-scalar properties not listed in OBJECT_COLUMNS, each element will appear in its own column, with the index of that element separated from the property name by this separator. For example, the inertia tensor of a 2D region will appear in four columns: ``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``, and ``inertia_tensor-1-1`` (where the separator is ``-``). Object columns are those that cannot be split in this way because the number of columns would change depending on the object. For example, ``image`` and ``coords``. Returns ------- out_dict : dict Dictionary mapping property names to an array of values of that property, one value per region. This dictionary can be used as input to pandas ``DataFrame`` to map property names to columns in the frame and regions to rows. If the image has 0 regions, the output will be for a an ndimensional cube of size 4 with a hypercube in the middle of it. All arrays will have no elements and parameters which are treated as arrays will have not elements. Notes ----- Each column contains either a scalar property, an object property, or an element in a multidimensional array. Properties with scalar values for each region, such as ""eccentricity"", will appear as a float or int array with that property name as key. Multidimensional properties *of fixed size* for a given image dimension, such as ""centroid"" (every centroid will have three elements in a 3D image, no matter the region size), will be split into that many columns, with the name {property_name}{separator}{element_num} (for 1D properties), {property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D properties), and so on. For multidimensional properties that don't have a fixed size, such as ""image"" (the image of a region varies in size depending on the region size), an object array will be used, with the corresponding property name as the key. Examples -------- >>> from skimage import data, util, measure >>> image = data.coins() >>> label_image = measure.label(image > 110, connectivity=image.ndim) >>> props = regionprops_table(label_image, image, ... properties=['label', 'inertia_tensor', ... 'inertia_tensor_eigvals']) >>> props # doctest: +ELLIPSIS +SKIP {'label': array([ 1, 2, ...]), ... 'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ... ..., 'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])} The resulting dictionary can be directly passed to pandas, if installed, to obtain a clean DataFrame: >>> import pandas as pd # doctest: +SKIP >>> data = pd.DataFrame(props) # doctest: +SKIP >>> data.head() # doctest: +SKIP label inertia_tensor-0-0 ... inertia_tensor_eigvals-1 0 1 4012.909888 ... 267.065503 1 2 8.514739 ... 2.834806 2 3 0.666667 ... 0.000000 3 4 0.000000 ... 0.000000 4 5 0.222222 ... 0.111111 [5 rows x 7 columns] """""" regions = regionprops(label_image, intensity_image=intensity_image, cache=cache) if len(regions) == 0: label_image = np.zeros(tuple([3] * len(label_image.shape)), dtype=int) label_image[tuple([1] * len(label_image.shape))] = 1 if intensity_image is not None: intensity_image = label_image.copy() regions = regionprops(label_image, intensity_image=intensity_image, cache=cache) out = {} for prop in properties: if np.isscalar(regions[0][prop]) or prop in OBJECT_COLUMNS: out[prop] = np.empty(shape=(0,), dtype=COL_DTYPES[prop]) return out return _props_to_dict(regions, properties=properties, separator=separator) ","def regionprops_table(label_image, intensity_image=None, properties=('label', 'bbox'), *, cache=True, separator='-'): """"""Compute image properties and return them as a pandas-compatible table. The table is a dictionary mapping column names to value arrays. See Notes section below for details. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. properties : tuple or list of str, optional Properties that will be included in the resulting dictionary For a list of available properties, please see :func:`regionprops`. Users should remember to add ""label"" to keep track of region identities. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. separator : str, optional For non-scalar properties not listed in OBJECT_COLUMNS, each element will appear in its own column, with the index of that element separated from the property name by this separator. For example, the inertia tensor of a 2D region will appear in four columns: ``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``, and ``inertia_tensor-1-1`` (where the separator is ``-``). Object columns are those that cannot be split in this way because the number of columns would change depending on the object. For example, ``image`` and ``coords``. Returns ------- out_dict : dict Dictionary mapping property names to an array of values of that property, one value per region. This dictionary can be used as input to pandas ``DataFrame`` to map property names to columns in the frame and regions to rows. If the image has 0 regions, the output will be for a an ndimensional cube of size 4 with a hypercube in the middle of it. All arrays will have no elements and parameters which are treated as arrays will have not elements. Notes ----- Each column contains either a scalar property, an object property, or an element in a multidimensional array. Properties with scalar values for each region, such as ""eccentricity"", will appear as a float or int array with that property name as key. Multidimensional properties *of fixed size* for a given image dimension, such as ""centroid"" (every centroid will have three elements in a 3D image, no matter the region size), will be split into that many columns, with the name {property_name}{separator}{element_num} (for 1D properties), {property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D properties), and so on. For multidimensional properties that don't have a fixed size, such as ""image"" (the image of a region varies in size depending on the region size), an object array will be used, with the corresponding property name as the key. Examples -------- >>> from skimage import data, util, measure >>> image = data.coins() >>> label_image = measure.label(image > 110, connectivity=image.ndim) >>> props = regionprops_table(label_image, image, ... properties=['label', 'inertia_tensor', ... 'inertia_tensor_eigvals']) >>> props # doctest: +ELLIPSIS +SKIP {'label': array([ 1, 2, ...]), ... 'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ... ..., 'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])} The resulting dictionary can be directly passed to pandas, if installed, to obtain a clean DataFrame: >>> import pandas as pd # doctest: +SKIP >>> data = pd.DataFrame(props) # doctest: +SKIP >>> data.head() # doctest: +SKIP label inertia_tensor-0-0 ... inertia_tensor_eigvals-1 0 1 4012.909888 ... 267.065503 1 2 8.514739 ... 2.834806 2 3 0.666667 ... 0.000000 3 4 0.000000 ... 0.000000 4 5 0.222222 ... 0.111111 [5 rows x 7 columns] """""" regions = regionprops(label_image, intensity_image=intensity_image, cache=cache) if len(regions) == 0: label_image = np.zeros((3,) * label_image.ndim, dtype=int) label_image[tuple([1] * len(label_image.shape))] = 1 if intensity_image is not None: intensity_image = label_image.copy() regions = regionprops(label_image, intensity_image=intensity_image, cache=cache) out = {} for prop in properties: if np.isscalar(regions[0][prop]) or prop in OBJECT_COLUMNS: out[prop] = np.empty(shape=(0,), dtype=COL_DTYPES[prop]) return out return _props_to_dict(regions, properties=properties, separator=separator) " 39244,"def add_noise(waveform: torch.Tensor, noise: torch.Tensor, lengths: torch.Tensor, snr: torch.Tensor) -> torch.Tensor: r""""""Scales and adds noise from multiple sources to waveform per signal-to-noise ratio. Specifically, for each waveform vector :math:`x \in \mathbb{R}^L` and noise vectors :math:`n_1, \ldots, n_N \in \mathbb{R}^L` corresponding to :math:`N` sources, the function computes output :math:`y` as .. math:: y = x + \sum_{i = 1}^N a_i n_i , where .. math:: a_i = \sqrt{ \frac{ ||x||_{2}^{2} }{ ||n_i||_{2}^{2} } \cdot 10^{-\frac{\text{SNR}_i}{10}} } , with :math:`\text{SNR}_i` being the desired signal-to-noise ratio between :math:`x` and :math:`n_i`, in dB. Note that this function broadcasts singleton leading dimensions in its inputs in a manner that is consistent with the above formulae and PyTorch's broadcasting semantics. Args: waveform (torch.Tensor): Input waveform, with shape `(*, L)`. noise (torch.Tensor): Noise, with shape `(*, N, L)` (leading dimensions must match those of ``waveform``). lengths (torch.Tensor): Valid lengths of signals in `waveform` and `noise`, with shape `(*,)`. snr (torch.Tensor): Signal-to-noise ratios in dB, with shape `(*, N)`. Returns: torch.Tensor: Result of scaling and adding ``noise`` to ``waveform``, with shape `(*, L)` (same shape as ``waveform``). """""" input_leading_dims = (len(waveform.shape[:-1]), len(noise.shape[:-2]), len(lengths.shape), len(snr.shape[:-1])) if any([a != input_leading_dims[0] for a in input_leading_dims[1:]]): raise ValueError(f""Input leading dimensions don't match (got {input_leading_dims})."") if waveform.shape[-1] != noise.shape[-1]: raise ValueError(f""Length dimensions of waveform and noise don't match (got {waveform[-1]} and {noise[-1]})."") if noise.shape[-2] != snr.shape[-1]: raise ValueError(f""Noise source dimensions of noise and snr don't match (got {noise[-2]} and {snr[-1]})."") # compute scale mask = torch.arange(0, waveform.size(-1)).expand(waveform.shape) < lengths.unsqueeze(-1) # (*, L) < (*, 1) = (*, L) energy_signal = torch.linalg.vector_norm(waveform * mask, ord=2, dim=-1) ** 2 # (*,) energy_noise = torch.linalg.vector_norm(noise * mask.unsqueeze(-2), ord=2, dim=-1) ** 2 # (*, N) original_snr = energy_signal.unsqueeze(-1) / energy_noise # (*, N) snr_abs = 10 ** (snr / 10.0) # (*, N) scale = (original_snr / snr_abs).sqrt() # (*, N) # scale noise scaled_noise = scale.unsqueeze(-1) * noise # (*, N, 1) * (*, N, L) = (*, N, L) # sum-reduce scaled noise scaled_noise = scaled_noise.sum(-2) # (*, L) return waveform + scaled_noise # (*, L) ","def add_noise(waveform: torch.Tensor, noise: torch.Tensor, lengths: torch.Tensor, snr: torch.Tensor) -> torch.Tensor: r""""""Scales and adds noise from multiple sources to waveform per signal-to-noise ratio. Specifically, for each waveform vector :math:`x \in \mathbb{R}^L` and noise vectors :math:`n_1, \ldots, n_N \in \mathbb{R}^L` corresponding to :math:`N` sources, the function computes output :math:`y` as .. math:: y = x + \sum_{i = 1}^N a_i n_i , where .. math:: a_i = \sqrt{ \frac{ ||x||_{2}^{2} }{ ||n_i||_{2}^{2} } \cdot 10^{-\frac{\text{SNR}_i}{10}} } , with :math:`\text{SNR}_i` being the desired signal-to-noise ratio between :math:`x` and :math:`n_i`, in dB. Note that this function broadcasts singleton leading dimensions in its inputs in a manner that is consistent with the above formulae and PyTorch's broadcasting semantics. Args: waveform (torch.Tensor): Input waveform, with shape `(*, L)`. noise (torch.Tensor): Noise, with shape `(*, N, L)` (leading dimensions must match those of ``waveform``). lengths (torch.Tensor): Valid lengths of signals in `waveform` and `noise`, with shape `(*,)`. snr (torch.Tensor): Signal-to-noise ratios in dB, with shape `(*, N)`. Returns: torch.Tensor: Result of scaling and adding ``noise`` to ``waveform``, with shape `(*, L)` (same shape as ``waveform``). """""" input_leading_dims = (waveform.ndim-1, noise.ndim-2, lengths.ndim, snr.ndim-1) if any([a != input_leading_dims[0] for a in input_leading_dims[1:]]): raise ValueError(f""Input leading dimensions don't match (got {input_leading_dims})."") if waveform.shape[-1] != noise.shape[-1]: raise ValueError(f""Length dimensions of waveform and noise don't match (got {waveform[-1]} and {noise[-1]})."") if noise.shape[-2] != snr.shape[-1]: raise ValueError(f""Noise source dimensions of noise and snr don't match (got {noise[-2]} and {snr[-1]})."") # compute scale mask = torch.arange(0, waveform.size(-1)).expand(waveform.shape) < lengths.unsqueeze(-1) # (*, L) < (*, 1) = (*, L) energy_signal = torch.linalg.vector_norm(waveform * mask, ord=2, dim=-1) ** 2 # (*,) energy_noise = torch.linalg.vector_norm(noise * mask.unsqueeze(-2), ord=2, dim=-1) ** 2 # (*, N) original_snr = energy_signal.unsqueeze(-1) / energy_noise # (*, N) snr_abs = 10 ** (snr / 10.0) # (*, N) scale = (original_snr / snr_abs).sqrt() # (*, N) # scale noise scaled_noise = scale.unsqueeze(-1) * noise # (*, N, 1) * (*, N, L) = (*, N, L) # sum-reduce scaled noise scaled_noise = scaled_noise.sum(-2) # (*, L) return waveform + scaled_noise # (*, L) " 52025,"def apply_pin_expressions(version, min_pin='x.x.x.x.x.x.x', max_pin='x'): pins = [len(p.split('.')) if p else None for p in (min_pin, max_pin)] parsed_version = VersionOrder(version).version[1:] nesting_position = None flat_list = [] for idx, item in enumerate(parsed_version): if isinstance(item, list): nesting_position = idx flat_list.extend(item) else: flat_list.append(item) if max_pin and len(max_pin.split('.')) > len(flat_list): pins[1] = len(flat_list) versions = ['', ''] # first idx is lower bound pin; second is upper bound pin. # pin value is number of places to pin. for p_idx, pin in enumerate(pins): if pin: # flat_list is the blown-out representation of the version for v_idx, v in enumerate(flat_list[:pin]): # upper bound pin if p_idx == 1 and v_idx == pin - 1: # is the last place an alphabetic character? OpenSSL, JPEG alpha_ver = str(flat_list[min(pin, len(flat_list) - 1)]).isalpha() v = _increment(v, alpha_ver) versions[p_idx] += str(v) if v_idx != nesting_position: versions[p_idx] += '.' if versions[p_idx][-1] == '.': versions[p_idx] = versions[p_idx][:-1] if versions[0]: if VersionOrder(version) < VersionOrder(versions[0]): # If the minimum is greater than the version this is a pre-release build. # Use the version as the lower bound versions[0] = '>=' + str(version) else: versions[0] = '>=' + versions[0] if versions[1]: versions[1] = '<' + versions[1] return ','.join([v for v in versions if v]) ","def apply_pin_expressions(version, min_pin='x.x.x.x.x.x.x', max_pin='x'): pins = [len(p.split('.')) if p else None for p in (min_pin, max_pin)] parsed_version = VersionOrder(version).version[1:] nesting_position = None flat_list = [] for idx, item in enumerate(parsed_version): if isinstance(item, list): nesting_position = idx flat_list.extend(item) else: flat_list.append(item) if max_pin and len(max_pin.split('.')) > len(flat_list): pins[1] = len(flat_list) versions = ['', ''] # first idx is lower bound pin; second is upper bound pin. # pin value is number of places to pin. for p_idx, pin in enumerate(pins): if pin: # flat_list is the blown-out representation of the version for v_idx, v in enumerate(flat_list[:pin]): # upper bound pin if p_idx == 1 and v_idx == pin - 1: # is the last place an alphabetic character? OpenSSL, JPEG alpha_ver = str(flat_list[min(pin, len(flat_list) - 1)]).isalpha() v = _increment(v, alpha_ver) versions[p_idx] += str(v) if v_idx != nesting_position: versions[p_idx] += '.' if versions[p_idx][-1] == '.': versions[p_idx] = versions[p_idx][:-1] if versions[0]: if ( VersionOrder(version) < VersionOrder(versions[0]) and not MatchSpec('pkg ' + version).match(MatchSpec('pkg ' + versions[0])) ): # If the minimum is greater than the version this is a pre-release build. # Use the version as the lower bound versions[0] = '>=' + str(version) else: versions[0] = '>=' + versions[0] if versions[1]: versions[1] = '<' + versions[1] return ','.join([v for v in versions if v]) " 42056,"def create_study(n_objectives, seed): directions = [""minimize"" for _ in range(n_objectives)] sampler_name = sys.argv[1] # Sampler. sampler_cls = getattr( optuna.multi_objective.samplers, sampler_name, getattr(optuna.integration, sampler_name, None), ) if sampler_cls is None: raise ValueError(""Unknown sampler: {}."".format(sampler_name)) # TODO(drumehiron): sampler_kwargs # sampler_kwargs = json.loads(sys.argv[2]) # try: # sampler_kwargs[""seed""] = seed # sampler = sampler_cls(**sampler_kwargs) # except: # del sampler_kwargs[""seed""] # sampler = sampler_cls(**sampler_kwargs) sampler = sampler_cls() return optuna.multi_objective.create_study(directions=directions, sampler=sampler) ","def create_study(seed: int) -> optuna.Study: directions = [""minimize"" for _ in range(n_objectives)] sampler_name = sys.argv[1] # Sampler. sampler_cls = getattr( optuna.multi_objective.samplers, sampler_name, getattr(optuna.integration, sampler_name, None), ) if sampler_cls is None: raise ValueError(""Unknown sampler: {}."".format(sampler_name)) # TODO(drumehiron): sampler_kwargs # sampler_kwargs = json.loads(sys.argv[2]) # try: # sampler_kwargs[""seed""] = seed # sampler = sampler_cls(**sampler_kwargs) # except: # del sampler_kwargs[""seed""] # sampler = sampler_cls(**sampler_kwargs) sampler = sampler_cls() return optuna.multi_objective.create_study(directions=directions, sampler=sampler) " 25898,"def load_arguments(self, _): # pylint: disable=too-many-statements # Arguments for IoT DPS with self.argument_context('iot dps') as c: c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name') with self.argument_context('iot dps create') as c: c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Provisioning Service. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotDpsSku), help='Pricing tier for the IoT provisioning service.') c.argument('unit', help='Units in your IoT Provisioning Service.', type=int) for subgroup in ['access-policy', 'linked-hub', 'certificate']: with self.argument_context('iot dps {}'.format(subgroup)) as c: c.argument('dps_name', options_list=['--dps-name'], id_part=None) with self.argument_context('iot dps access-policy') as c: c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'], help='A friendly name for DPS access policy.') with self.argument_context('iot dps access-policy create') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps access-policy update') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps linked-hub') as c: c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.') with self.argument_context('iot dps linked-hub create') as c: c.argument('connection_string', help='Connection string of the IoT hub.') c.argument('location', get_location_type(self.cli_ctx), help='Location of the IoT hub.') c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the IoT hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps linked-hub update') as c: c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the Iot hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps allocation-policy update') as c: c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy), help='Allocation policy for the IoT provisioning service.') with self.argument_context('iot dps certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter(["".cer"", "".pem""]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'], help='A friendly name for the certificate.') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') # Arguments for IoT Hub with self.argument_context('iot hub') as c: c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') for subgroup in ['consumer-group', 'policy', 'job', 'certificate', 'routing-endpoint', 'route']: with self.argument_context('iot hub {}'.format(subgroup)) as c: c.argument('hub_name', options_list=['--hub-name']) with self.argument_context('iot hub route') as c: c.argument('route_name', options_list=['--route-name', '--name', '-n'], help='Name of the Route.') c.argument('endpoint_name', options_list=['--endpoint-name', '--endpoint', '--en'], help='Name of the routing endpoint.') c.argument('condition', options_list=['--condition', '-c'], help='Condition that is evaluated to apply the routing rule.') c.argument('enabled', options_list=['--enabled', '-e'], arg_type=get_three_state_flag(), help='A boolean indicating whether to enable route to the Iot hub.') c.argument('source_type', arg_type=get_enum_type(RouteSourceType), options_list=['--source-type', '--type', '--source', '-s'], help='Source of the route.') with self.argument_context('iot hub route test') as c: c.argument('body', options_list=['--body', '-b'], help='Body of the route message.') c.argument('app_properties', options_list=['--app-properties', '--ap'], help='App properties of the route message.') c.argument('system_properties', options_list=['--system-properties', '--sp'], help='System properties of the route message.') with self.argument_context('iot hub routing-endpoint') as c: c.argument('endpoint_name', options_list=['--endpoint-name', '--name', '-n'], help='Name of the Routing Endpoint.') c.argument('endpoint_resource_group', options_list=['--endpoint-resource-group', '--erg', '-r'], help='Resource group of the Endpoint resoure.') c.argument('endpoint_subscription_id', options_list=['--endpoint-subscription-id', '-s'], help='SubscriptionId of the Endpoint resource.') c.argument('connection_string', options_list=['--connection-string', '-c'], help='Connection string of the Routing Endpoint.') c.argument('container_name', options_list=['--container-name', '--container'], help='Name of the storage container.') c.argument('endpoint_type', arg_type=get_enum_type(EndpointType), options_list=['--endpoint-type', '--type', '-t'], help='Type of the Routing Endpoint.') c.argument('encoding', options_list=['--encoding'], arg_type=get_enum_type(EncodingFormat), help='Encoding format for the container. The default is AVRO. ' 'Note that this field is applicable only for blob container endpoints.') with self.argument_context('iot hub certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter(["".cer"", "".pem""]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.') with self.argument_context('iot hub consumer-group') as c: c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2', help='Event hub consumer group name.') c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.') with self.argument_context('iot hub policy') as c: c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1', help='Shared access policy name.') permission_values = ', '.join([x.value for x in SimpleAccessRights]) c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower, help='Permissions of shared access policy. Use space-separated list for multiple permissions. ' 'Possible values: {}'.format(permission_values)) with self.argument_context('iot hub job') as c: c.argument('job_id', id_part='child_name_1', help='Job Id.') with self.argument_context('iot hub create') as c: c.argument('hub_name', completer=None) c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Hub. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotHubSku), help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. ' 'Note that only one free IoT hub instance is allowed in each ' 'subscription. Exception will be thrown if free instances exceed one.') c.argument('unit', help='Units in your IoT Hub.', type=int) c.argument('partition_count', help='The number of partitions of the backing Event Hub for device-to-cloud messages.', type=int) with self.argument_context('iot hub show-connection-string') as c: c.argument('show_all', options_list=['--all'], help='Allow to show all shared access policies.') c.argument('hub_name', options_list=['--hub-name', '--name', '-n']) c.argument('policy_name', help='Shared access policy to use.') c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.') with self.argument_context('iot hub manual-failover') as c: c.argument('failover_region', options_list=['--failover-region', '--fr'], help='The region that the IoT hub' 'fails over to. Must be the paired region to the current IoT hub region.') # Arguments for IoT Digital Twin with self.argument_context('iot pnp') as c: c.argument('repo_endpoint', options_list=['--repo-rp', '--rp', '--endpoint', '-e'], help='PnP endpoint.') c.argument('repo_id', options_list=['--repo-id', '--id'], help='Repository Id.') with self.argument_context('iot pnp repository') as c: c.argument('repo_name', options_list=['--repo-name', '--name', '-n'], help='Repository Name.') with self.argument_context('iot pnp repository get-provision-status') as c: c.argument('track_id', options_list=['--track-id', '--tid'], help='Tracking id (provisioningState).') with self.argument_context('iot pnp authkey') as c: c.argument('key_id', options_list=['--key-id', '--kid'], help='Key Id.') c.argument('user_role', options_list=['--user-role', '--role', '--ur', '-r'], help='User role for the key.', arg_type=get_enum_type(UserRole)) ","def load_arguments(self, _): # pylint: disable=too-many-statements # Arguments for IoT DPS with self.argument_context('iot dps') as c: c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name') with self.argument_context('iot dps create') as c: c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Provisioning Service. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotDpsSku), help='Pricing tier for the IoT provisioning service.') c.argument('unit', help='Units in your IoT Provisioning Service.', type=int) for subgroup in ['access-policy', 'linked-hub', 'certificate']: with self.argument_context('iot dps {}'.format(subgroup)) as c: c.argument('dps_name', options_list=['--dps-name'], id_part=None) with self.argument_context('iot dps access-policy') as c: c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'], help='A friendly name for DPS access policy.') with self.argument_context('iot dps access-policy create') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps access-policy update') as c: c.argument('rights', options_list=['--rights', '-r'], nargs='+', arg_type=get_enum_type(AccessRightsDescription), help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.') c.argument('primary_key', help='Primary SAS key value.') c.argument('secondary_key', help='Secondary SAS key value.') with self.argument_context('iot dps linked-hub') as c: c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.') with self.argument_context('iot dps linked-hub create') as c: c.argument('connection_string', help='Connection string of the IoT hub.') c.argument('location', get_location_type(self.cli_ctx), help='Location of the IoT hub.') c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the IoT hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps linked-hub update') as c: c.argument('apply_allocation_policy', help='A boolean indicating whether to apply allocation policy to the Iot hub.', arg_type=get_three_state_flag()) c.argument('allocation_weight', help='Allocation weight of the IoT hub.') with self.argument_context('iot dps allocation-policy update') as c: c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy), help='Allocation policy for the IoT provisioning service.') with self.argument_context('iot dps certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter(["".cer"", "".pem""]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'], help='A friendly name for the certificate.') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') # Arguments for IoT Hub with self.argument_context('iot hub') as c: c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name') c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.') for subgroup in ['consumer-group', 'policy', 'job', 'certificate', 'routing-endpoint', 'route']: with self.argument_context('iot hub {}'.format(subgroup)) as c: c.argument('hub_name', options_list=['--hub-name']) with self.argument_context('iot hub route') as c: c.argument('route_name', options_list=['--route-name', '--name', '-n'], help='Name of the Route.') c.argument('endpoint_name', options_list=['--endpoint-name', '--endpoint', '--en'], help='Name of the routing endpoint.') c.argument('condition', options_list=['--condition', '-c'], help='Condition that is evaluated to apply the routing rule.') c.argument('enabled', options_list=['--enabled', '-e'], arg_type=get_three_state_flag(), help='A boolean indicating whether to enable route to the Iot hub.') c.argument('source_type', arg_type=get_enum_type(RouteSourceType), options_list=['--source-type', '--type', '--source', '-s'], help='Source of the route.') with self.argument_context('iot hub route test') as c: c.argument('body', options_list=['--body', '-b'], help='Body of the route message.') c.argument('app_properties', options_list=['--app-properties', '--ap'], help='App properties of the route message.') c.argument('system_properties', options_list=['--system-properties', '--sp'], help='System properties of the route message.') with self.argument_context('iot hub routing-endpoint') as c: c.argument('endpoint_name', options_list=['--endpoint-name', '--name', '-n'], help='Name of the Routing Endpoint.') c.argument('endpoint_resource_group', options_list=['--endpoint-resource-group', '--erg', '-r'], help='Resource group of the Endpoint resoure.') c.argument('endpoint_subscription_id', options_list=['--endpoint-subscription-id', '-s'], help='SubscriptionId of the Endpoint resource.') c.argument('connection_string', options_list=['--connection-string', '-c'], help='Connection string of the Routing Endpoint.') c.argument('container_name', options_list=['--container-name', '--container'], help='Name of the storage container.') c.argument('endpoint_type', arg_type=get_enum_type(EndpointType), options_list=['--endpoint-type', '--type', '-t'], help='Type of the Routing Endpoint.') c.argument('encoding', options_list=['--encoding'], arg_type=get_enum_type(EncodingFormat), help='Encoding format for the container. The default is AVRO. ' 'Note that this field is applicable only for blob container endpoints.') with self.argument_context('iot hub certificate') as c: c.argument('certificate_path', options_list=['--path', '-p'], type=file_type, completer=FilesCompleter(["".cer"", "".pem""]), help='The path to the file containing the certificate.') c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.') with self.argument_context('iot hub consumer-group') as c: c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2', help='Event hub consumer group name.') c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.') with self.argument_context('iot hub policy') as c: c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1', help='Shared access policy name.') permission_values = ', '.join([x.value for x in SimpleAccessRights]) c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower, help='Permissions of shared access policy. Use space-separated list for multiple permissions. ' 'Possible values: {}'.format(permission_values)) with self.argument_context('iot hub job') as c: c.argument('job_id', id_part='child_name_1', help='Job Id.') with self.argument_context('iot hub create') as c: c.argument('hub_name', completer=None) c.argument('location', get_location_type(self.cli_ctx), help='Location of your IoT Hub. Default is the location of target resource group.') c.argument('sku', arg_type=get_enum_type(IotHubSku), help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. ' 'Note that only one free IoT hub instance is allowed in each ' 'subscription. Exception will be thrown if free instances exceed one.') c.argument('unit', help='Units in your IoT Hub.', type=int) c.argument('partition_count', help='The number of partitions of the backing Event Hub for device-to-cloud messages.', type=int) with self.argument_context('iot hub show-connection-string') as c: c.argument('show_all', options_list=['--all'], help='Allow to show all shared access policies.') c.argument('hub_name', options_list=['--hub-name', '--name', '-n']) c.argument('policy_name', help='Shared access policy to use.') c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.') with self.argument_context('iot hub manual-failover') as c: c.argument('failover_region', options_list=['--failover-region', '--fr'], help='The region that the IoT hub' 'fails over to. Must be the paired region to the current IoT hub region.') # Arguments for IoT Digital Twin with self.argument_context('iot pnp') as c: c.argument('repo_endpoint', options_list=['--endpoint', '-e'], help='PnP endpoint.') c.argument('repo_id', options_list=['--repo-id', '--id'], help='Repository Id.') with self.argument_context('iot pnp repository') as c: c.argument('repo_name', options_list=['--repo-name', '--name', '-n'], help='Repository Name.') with self.argument_context('iot pnp repository get-provision-status') as c: c.argument('track_id', options_list=['--track-id', '--tid'], help='Tracking id (provisioningState).') with self.argument_context('iot pnp authkey') as c: c.argument('key_id', options_list=['--key-id', '--kid'], help='Key Id.') c.argument('user_role', options_list=['--user-role', '--role', '--ur', '-r'], help='User role for the key.', arg_type=get_enum_type(UserRole)) " 8926,"def ctcp(function=None, *command_list): """"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``). :param str ctcp_command: one or more CTCP command(s) on which to trigger (really, the only useful value is ``ACTION``) .. versionadded:: 7.1 This is now ``ctcp`` instead of ``intent``, and it can be called without argument, assuming ``ACTION`` in that case. .. note:: This used to be ``@intent``, for a long dead feature in the IRCv3 spec. It is now replaced by ``@ctcp``, which can be used without arguments. In that case, Sopel will assume to trigger on ``ACTION``. As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``. """""" default_commands = ('ACTION',) + command_list if function is None: return ctcp(*default_commands) # called as ``@ctcp()`` elif callable(function): # called as ``@ctcp`` or ``@ctcp(function)`` # or even ``@ctcp(function, 'ACTION', ...)`` return ctcp(*default_commands)(function) # function is not None, and it is not a callable # called as ``@ctcp('ACTION', ...)`` ctcp_commands = (function,) + command_list def add_attribute(function): function._sopel_callable = True if not hasattr(function, ""intents""): function.intents = [] for name in ctcp_commands: if name not in function.intents: function.intents.append(name) return function return add_attribute ","def ctcp(function=None, *command_list): """"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``). :param str ctcp_command: one or more CTCP command(s) on which to trigger (really, the only useful value is ``ACTION``) .. versionadded:: 7.1 This is now ``ctcp`` instead of ``intent``, and it can be called without argument, assuming ``ACTION`` in that case. .. note:: This used to be ``@intent``, for a long dead feature in the IRCv3 spec. It is now replaced by ``@ctcp``, which can be used without arguments. In that case, Sopel will assume it should trigger on ``ACTION``. As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``. """""" default_commands = ('ACTION',) + command_list if function is None: return ctcp(*default_commands) # called as ``@ctcp()`` elif callable(function): # called as ``@ctcp`` or ``@ctcp(function)`` # or even ``@ctcp(function, 'ACTION', ...)`` return ctcp(*default_commands)(function) # function is not None, and it is not a callable # called as ``@ctcp('ACTION', ...)`` ctcp_commands = (function,) + command_list def add_attribute(function): function._sopel_callable = True if not hasattr(function, ""intents""): function.intents = [] for name in ctcp_commands: if name not in function.intents: function.intents.append(name) return function return add_attribute " 47911,"def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # Plugin initialization for specified device and load extensions library if specified. log.info('Creating Inference Engine...') ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, 'CPU') # Read IR log.info('Loading Mask-RCNN network') mask_rcnn_net = ie.read_network(args.mask_rcnn_model, os.path.splitext(args.mask_rcnn_model)[0] + '.bin') log.info('Loading encoder part of text recognition network') text_enc_net = ie.read_network(args.text_enc_model, os.path.splitext(args.text_enc_model)[0] + '.bin') log.info('Loading decoder part of text recognition network') text_dec_net = ie.read_network(args.text_dec_model, os.path.splitext(args.text_dec_model)[0] + '.bin') required_input_keys = {'im_data', 'im_info'} assert required_input_keys == set(mask_rcnn_net.input_info), \ 'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys)) required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'} assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \ 'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys)) n, c, h, w = mask_rcnn_net.input_info['im_data'].input_data.shape assert n == 1, 'Only batch 1 is supported by the demo application' log.info('Loading IR to the plugin...') mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2) text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device) text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device) hidden_shape = text_dec_net.input_info[args.trd_input_prev_hidden].input_data.shape del mask_rcnn_net del text_enc_net del text_dec_net input_source = args.input_source if os.path.isdir(input_source): cap = FolderCapture(input_source) else: try: input_source = int(args.input_source) cap = cv2.VideoCapture(input_source) cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) except ValueError: cap = cv2.VideoCapture(input_source) if not cap.isOpened(): raise RuntimeError('Failed to open ""{}""'.format(input_source)) ret, frame = cap.read() if not ret: raise RuntimeError(""Can't read an image form the input"") if args.no_track: tracker = None else: tracker = StaticIOUTracker() visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores) render_time = 0 presenter = monitors.Presenter(args.utilization_monitors, 45, (frame.shape[1] // 4, frame.shape[0] // 8)) log.info('Starting inference...') print(""To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"") while ret: if not args.keep_aspect_ratio: # Resize the image to a target size. scale_x = w / frame.shape[1] scale_y = h / frame.shape[0] input_image = cv2.resize(frame, (w, h)) else: # Resize the image to keep the same aspect ratio and to fit it to a window of a target size. scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1]) input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y) input_image_size = input_image.shape[:2] input_image = np.pad(input_image, ((0, h - input_image_size[0]), (0, w - input_image_size[1]), (0, 0)), mode='constant', constant_values=0) # Change data layout from HWC to CHW. input_image = input_image.transpose((2, 0, 1)) input_image = input_image.reshape((n, c, h, w)).astype(np.float32) input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32) # Run the net. inf_start = time.time() outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info}) # Parse detection results of the current request boxes = outputs['boxes'] scores = outputs['scores'] classes = outputs['classes'].astype(np.uint32) raw_masks = outputs['raw_masks'] text_features = outputs['text_features'] # Filter out detections with low confidence. detections_filter = scores > args.prob_threshold scores = scores[detections_filter] classes = classes[detections_filter] boxes = boxes[detections_filter] raw_masks = raw_masks[detections_filter] text_features = text_features[detections_filter] boxes[:, 0::2] /= scale_x boxes[:, 1::2] /= scale_y masks = [] for box, cls, raw_mask in zip(boxes, classes, raw_masks): raw_cls_mask = raw_mask[cls, ...] mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1]) masks.append(mask) texts = [] for feature in text_features: feature = text_enc_exec_net.infer({'input': feature})['output'] feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1)) feature = np.transpose(feature, (0, 2, 1)) hidden = np.zeros(hidden_shape) prev_symbol_index = np.ones((1,)) * SOS_INDEX text = '' for i in range(MAX_SEQ_LEN): decoder_output = text_dec_exec_net.infer({ args.trd_input_prev_symbol: prev_symbol_index, args.trd_input_prev_hidden: hidden, args.trd_input_encoder_outputs: feature}) symbols_distr = decoder_output[args.trd_output_symbols_distr] prev_symbol_index = int(np.argmax(symbols_distr, axis=1)) if prev_symbol_index == EOS_INDEX: break text += args.alphabet[prev_symbol_index] hidden = decoder_output[args.trd_output_cur_hidden] texts.append(text) inf_end = time.time() inf_time = inf_end - inf_start render_start = time.time() if len(boxes) and args.raw_output_message: log.info('Detected boxes:') log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ') for box, cls, score, mask in zip(boxes, classes, scores, masks): log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box)) # Get instance track IDs. masks_tracks_ids = None if tracker is not None: masks_tracks_ids = tracker(masks, classes) presenter.drawGraphs(frame) # Visualize masks. frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids) # Draw performance stats. inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000) render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000) cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1) # Print performance counters. if args.perf_counts: perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts() log.info('Performance counters:') print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status', 'real_time, us')) for layer, stats in perf_counts.items(): print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'])) if not args.no_show: # Show resulting image. cv2.imshow('Results', frame) render_end = time.time() render_time = render_end - render_start if not args.no_show: key = cv2.waitKey(args.delay) esc_code = 27 if key == esc_code: break presenter.handleKey(key) ret, frame = cap.read() print(presenter.reportMeans()) cv2.destroyAllWindows() cap.release() ","def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() # Plugin initialization for specified device and load extensions library if specified. log.info('Creating Inference Engine...') ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, 'CPU') # Read IR log.info('Loading Mask-RCNN network') mask_rcnn_net = ie.read_network(args.mask_rcnn_model, os.path.splitext(args.mask_rcnn_model)[0] + '.bin') log.info('Loading encoder part of text recognition network') text_enc_net = ie.read_network(args.text_enc_model, os.path.splitext(args.text_enc_model)[0] + '.bin') log.info('Loading decoder part of text recognition network') text_dec_net = ie.read_network(args.text_dec_model, os.path.splitext(args.text_dec_model)[0] + '.bin') required_input_keys = {'im_data', 'im_info'} assert required_input_keys == set(mask_rcnn_net.input_info), \ 'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys)) required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'} assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \ 'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys)) n, c, h, w = mask_rcnn_net.input_info['im_data'].input_data.shape assert n == 1, 'Only batch 1 is supported by the demo application' log.info('Loading IR to the plugin...') mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2) text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device) text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device) hidden_shape = text_dec_net.input_info[args.trd_input_prev_hidden].input_data.shape del mask_rcnn_net del text_enc_net del text_dec_net input_source = args.input_source if os.path.isdir(input_source): cap = FolderCapture(input_source) else: try: input_source = int(args.input_source) cap = cv2.VideoCapture(input_source) cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) except ValueError: cap = cv2.VideoCapture(input_source) if not cap.isOpened(): raise RuntimeError('Failed to open ""{}""'.format(input_source)) ret, frame = cap.read() if not ret: raise RuntimeError(""Can't read an image from the input"") if args.no_track: tracker = None else: tracker = StaticIOUTracker() visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores) render_time = 0 presenter = monitors.Presenter(args.utilization_monitors, 45, (frame.shape[1] // 4, frame.shape[0] // 8)) log.info('Starting inference...') print(""To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"") while ret: if not args.keep_aspect_ratio: # Resize the image to a target size. scale_x = w / frame.shape[1] scale_y = h / frame.shape[0] input_image = cv2.resize(frame, (w, h)) else: # Resize the image to keep the same aspect ratio and to fit it to a window of a target size. scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1]) input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y) input_image_size = input_image.shape[:2] input_image = np.pad(input_image, ((0, h - input_image_size[0]), (0, w - input_image_size[1]), (0, 0)), mode='constant', constant_values=0) # Change data layout from HWC to CHW. input_image = input_image.transpose((2, 0, 1)) input_image = input_image.reshape((n, c, h, w)).astype(np.float32) input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32) # Run the net. inf_start = time.time() outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info}) # Parse detection results of the current request boxes = outputs['boxes'] scores = outputs['scores'] classes = outputs['classes'].astype(np.uint32) raw_masks = outputs['raw_masks'] text_features = outputs['text_features'] # Filter out detections with low confidence. detections_filter = scores > args.prob_threshold scores = scores[detections_filter] classes = classes[detections_filter] boxes = boxes[detections_filter] raw_masks = raw_masks[detections_filter] text_features = text_features[detections_filter] boxes[:, 0::2] /= scale_x boxes[:, 1::2] /= scale_y masks = [] for box, cls, raw_mask in zip(boxes, classes, raw_masks): raw_cls_mask = raw_mask[cls, ...] mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1]) masks.append(mask) texts = [] for feature in text_features: feature = text_enc_exec_net.infer({'input': feature})['output'] feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1)) feature = np.transpose(feature, (0, 2, 1)) hidden = np.zeros(hidden_shape) prev_symbol_index = np.ones((1,)) * SOS_INDEX text = '' for i in range(MAX_SEQ_LEN): decoder_output = text_dec_exec_net.infer({ args.trd_input_prev_symbol: prev_symbol_index, args.trd_input_prev_hidden: hidden, args.trd_input_encoder_outputs: feature}) symbols_distr = decoder_output[args.trd_output_symbols_distr] prev_symbol_index = int(np.argmax(symbols_distr, axis=1)) if prev_symbol_index == EOS_INDEX: break text += args.alphabet[prev_symbol_index] hidden = decoder_output[args.trd_output_cur_hidden] texts.append(text) inf_end = time.time() inf_time = inf_end - inf_start render_start = time.time() if len(boxes) and args.raw_output_message: log.info('Detected boxes:') log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ') for box, cls, score, mask in zip(boxes, classes, scores, masks): log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box)) # Get instance track IDs. masks_tracks_ids = None if tracker is not None: masks_tracks_ids = tracker(masks, classes) presenter.drawGraphs(frame) # Visualize masks. frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids) # Draw performance stats. inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000) render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000) cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1) # Print performance counters. if args.perf_counts: perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts() log.info('Performance counters:') print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status', 'real_time, us')) for layer, stats in perf_counts.items(): print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'])) if not args.no_show: # Show resulting image. cv2.imshow('Results', frame) render_end = time.time() render_time = render_end - render_start if not args.no_show: key = cv2.waitKey(args.delay) esc_code = 27 if key == esc_code: break presenter.handleKey(key) ret, frame = cap.read() print(presenter.reportMeans()) cv2.destroyAllWindows() cap.release() " 24699,"def _declare_qos_parameteres( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """""" Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`. :param node: Node used to declare the parameters. :param topic_name: Topic name of the entity being created. :param qos: Default qos settings of the entity being created, that will be overriden with the user provided qos parameter overrides. :param options: Options that indicates which parameters are going to be declared. """""" if not issubclass(entity_type, (Publisher, Subscription)): raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription') entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription id_suffix = '' if options.entity_id is None else f'_{options.entity_id}' name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}' description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`' allowed_policies = _get_allowed_policies(entity_type) for policy in options.policy_kinds: if policy not in allowed_policies: continue policy_name = policy.name.lower() descriptor = ParameterDescriptor() descriptor.description = description.format(policy_name) descriptor.read_only = True param = node.declare_parameter( name.format(policy_name), _get_qos_policy_parameter(qos, policy), descriptor) _override_qos_policy_with_param(qos, policy, param) if options.callback is not None and not options.callback(qos): raise InvalidQosOverridesError( description.format('Provided qos overrides') + ', are not valid') ","def _declare_qos_parameteres( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """""" Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`. :param node: Node used to declare the parameters. :param topic_name: Topic name of the entity being created. :param qos: Default qos settings of the entity being created, that will be overriden with the user provided QoS parameter overrides. :param options: Options that indicates which parameters are going to be declared. """""" if not issubclass(entity_type, (Publisher, Subscription)): raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription') entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription id_suffix = '' if options.entity_id is None else f'_{options.entity_id}' name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}' description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`' allowed_policies = _get_allowed_policies(entity_type) for policy in options.policy_kinds: if policy not in allowed_policies: continue policy_name = policy.name.lower() descriptor = ParameterDescriptor() descriptor.description = description.format(policy_name) descriptor.read_only = True param = node.declare_parameter( name.format(policy_name), _get_qos_policy_parameter(qos, policy), descriptor) _override_qos_policy_with_param(qos, policy, param) if options.callback is not None and not options.callback(qos): raise InvalidQosOverridesError( description.format('Provided qos overrides') + ', are not valid') " 26795,"def get_connections(conn_id: str) -> List[Connection]: """""" Get all connections as an iterable. :param conn_id: connection id :return: array of connections """""" for secrets_backend in ensure_secrets_loaded(): conn_list = secrets_backend.get_connections(conn_id=conn_id) if conn_list: return list(conn_list) raise AirflowNotFoundException(""The conn_id `{0}` isn't defined"".format(conn_id)) ","def _get_secret_connections(conn_id: str) -> List[Connection]: """""" Get all connections as an iterable. :param conn_id: connection id :return: array of connections """""" for secrets_backend in ensure_secrets_loaded(): conn_list = secrets_backend.get_connections(conn_id=conn_id) if conn_list: return list(conn_list) raise AirflowNotFoundException(""The conn_id `{0}` isn't defined"".format(conn_id)) " 28601,"def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to ``ax.hexbin`` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax ","def plot_pair( data, group=""posterior"", var_names: Optional[List[str]] = None, filter_vars: Optional[str] = None, coords=None, marginals=False, figsize=None, textsize=None, kind: Union[str, List[str]] = ""scatter"", gridsize=""auto"", contour: Optional[bool] = None, plot_kwargs=None, fill_last=False, divergences=False, colorbar=False, labeller=None, ax=None, divergences_kwargs=None, scatter_kwargs=None, kde_kwargs=None, hexbin_kwargs=None, backend=None, backend_kwargs=None, marginal_kwargs=None, point_estimate=None, point_estimate_kwargs=None, point_estimate_marker_kwargs=None, reference_values=None, reference_values_kwargs=None, show=None, ): """""" Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal. Parameters ---------- data: obj Any object that can be converted to an :class:`az.InferenceData` object refer to documentation of :func:`az.convert_to_dataset` for details group: str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names: list of variable names, optional Variables to be plotted, if None all variable are plotted. Prefix the variables by ``~`` when you want to exclude them from the plot. filter_vars: {None, ""like"", ""regex""}, optional, default=None If ``None`` (default), interpret var_names as the real variables names. If ""like"", interpret var_names as substrings of the real variables names. If ""regex"", interpret var_names as regular expressions on the real variables names. A la ``pandas.filter``. coords: mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` marginals: bool, optional If True pairplot will include marginal distributions for every variable figsize: figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str or List[str] Type of plot to display (scatter, kde and/or hexbin) gridsize: int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool, optional, deprecated, Defaults to True. If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. **Note:** this default is implemented in the body of the code, not in argument processing. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences: Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar: bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot. Read the :ref:`label_guide` for more details and usage examples. ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs: dicts, optional Additional keywords passed to ``ax.scatter`` for divergences scatter_kwargs: Additional keywords passed to ``ax.plot`` when using scatter kind kde_kwargs: dict, optional Additional keywords passed to :func:`az.plot_kde` when using kde kind hexbin_kwargs: dict, optional Additional keywords passed to :meth:`matplotlib.axes.Axes.hexbin` when using hexbin kind backend: str, optional Select plotting backend {""matplotlib"",""bokeh""}. Default ""matplotlib"". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. marginal_kwargs: dict, optional Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions plotted in the diagonal. point_estimate: str, optional Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be plotted using a scatter marker and vertical/horizontal lines. point_estimate_kwargs: dict, optional Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh) point_estimate_marker_kwargs: dict, optional Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh reference_values: dict, optional Reference values for the plotted variables. The Reference values will be plotted using a scatter marker reference_values_kwargs: dict, optional Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['^t', 'mu'], ... filter_vars=""regex"", ... coords=coords, ... divergences=True, ... textsize=18) """""" valid_kinds = [""scatter"", ""kde"", ""hexbin""] kind_boolean: Union[bool, List[bool]] if isinstance(kind, str): kind_boolean = kind in valid_kinds else: kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))] if not np.all(kind_boolean): raise ValueError((f""Plot type {kind} not recognized."" ""Plot type must be in {valid_kinds}"")) if fill_last or contour: warnings.warn( ""fill_last and contour will be deprecated. Please use kde_kwargs"", UserWarning, ) if plot_kwargs: warnings.warn( ""plot_kwargs will be deprecated."" "" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs"", UserWarning, ) if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() # Get posterior draws and combine chains dataset = convert_to_dataset(data, group=group) var_names = _var_names(var_names, dataset, filter_vars) plotters = list( xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True) ) flat_var_names = [ labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters ] divergent_data = None diverging_mask = None # Assigning divergence group based on group param if group == ""posterior"": divergent_group = ""sample_stats"" elif group == ""prior"": divergent_group = ""sample_stats_prior"" else: divergences = False # Get diverging draws and combine chains if divergences: if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), ""diverging""): divergent_data = convert_to_dataset(data, group=divergent_group) _, diverging_mask = xarray_to_ndarray( divergent_data, var_names=(""diverging"",), combined=True ) diverging_mask = np.squeeze(diverging_mask) else: divergences = False warnings.warn( ""Divergences data not found, plotting without divergences. "" ""Make sure the sample method provides divergences data and "" ""that it is present in the `diverging` field of `sample_stats` "" ""or `sample_stats_prior` or set divergences=False"", UserWarning, ) if gridsize == ""auto"": gridsize = int(dataset.dims[""draw""] ** 0.35) numvars = len(flat_var_names) if numvars < 2: raise ValueError(""Number of variables to be plotted must be 2 or greater."") pairplot_kwargs = dict( ax=ax, plotters=plotters, numvars=numvars, figsize=figsize, textsize=textsize, kind=kind, scatter_kwargs=scatter_kwargs, kde_kwargs=kde_kwargs, hexbin_kwargs=hexbin_kwargs, gridsize=gridsize, colorbar=colorbar, divergences=divergences, diverging_mask=diverging_mask, divergences_kwargs=divergences_kwargs, flat_var_names=flat_var_names, backend_kwargs=backend_kwargs, marginal_kwargs=marginal_kwargs, show=show, marginals=marginals, point_estimate=point_estimate, point_estimate_kwargs=point_estimate_kwargs, point_estimate_marker_kwargs=point_estimate_marker_kwargs, reference_values=reference_values, reference_values_kwargs=reference_values_kwargs, ) if backend is None: backend = rcParams[""plot.backend""] backend = backend.lower() # TODO: Add backend kwargs plot = get_plotting_function(""plot_pair"", ""pairplot"", backend) ax = plot(**pairplot_kwargs) return ax " 8242,"def extract_along_coord(smap, coord): """""" Return the value of the image array at every point along the coordinate. For a given coordinate ``coord``, find all the pixels that cross the coordinate and extract the values of the image array in ``smap`` at these points. This is done by applying `Bresenham's line algorithm `_ between the consecutive pairs of points in the coordinate and then indexing the data array of ``smap`` at those points. Parameters ---------- smap : `~sunpy.map.GenericMap` coord : `~astropy.coordinates.SkyCoord` Coordinate along which to extract intensity Returns ------- intensity : `~astropy.units.Quantity` loop_coord : `~astropy.coordinates.SkyCoord` """""" if not len(coord.shape) or coord.shape[0] < 2: raise ValueError('At least two points are required for extracting intensity along a ' 'line. To extract points at single coordinates, use ' 'sunpy.map.maputils.sample_at_coords.') if not all(contains_coordinate(smap, coord)): raise ValueError('At least one coordinate is not within the bounds of the map.' 'To extract the intensity along a coordinate, all points must fall within ' 'the bounds of the map.') # Find pixels between each loop segment px, py = smap.wcs.world_to_array_index(coord) pix = [] for i in range(len(px)-1): b = _bresenham(px[i], py[i], px[i+1], py[i+1]) # Pop the last one, unless this is the final entry because the first point # of the next section will be the same if i < (len(px) - 2): b = b[:-1] pix.append(b) pix = np.vstack(pix) intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit) coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix) return intensity, coord_new ","def extract_along_coord(smap, coord): """""" Return the value of the image array at every pixel the coordinate path intersects. For a given coordinate ``coord``, find all the pixels that cross the coordinate and extract the values of the image array in ``smap`` at these points. This is done by applying `Bresenham's line algorithm `_ between the consecutive pairs of points in the coordinate and then indexing the data array of ``smap`` at those points. Parameters ---------- smap : `~sunpy.map.GenericMap` coord : `~astropy.coordinates.SkyCoord` Coordinate along which to extract intensity Returns ------- intensity : `~astropy.units.Quantity` loop_coord : `~astropy.coordinates.SkyCoord` """""" if not len(coord.shape) or coord.shape[0] < 2: raise ValueError('At least two points are required for extracting intensity along a ' 'line. To extract points at single coordinates, use ' 'sunpy.map.maputils.sample_at_coords.') if not all(contains_coordinate(smap, coord)): raise ValueError('At least one coordinate is not within the bounds of the map.' 'To extract the intensity along a coordinate, all points must fall within ' 'the bounds of the map.') # Find pixels between each loop segment px, py = smap.wcs.world_to_array_index(coord) pix = [] for i in range(len(px)-1): b = _bresenham(px[i], py[i], px[i+1], py[i+1]) # Pop the last one, unless this is the final entry because the first point # of the next section will be the same if i < (len(px) - 2): b = b[:-1] pix.append(b) pix = np.vstack(pix) intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit) coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix) return intensity, coord_new " 32150,"def results(results): """"""Outputs entries to the war-room Args: results (Union[list, dict]): The entry object or array of entry objects to output For example: results = { 'Type' : entryTypes['note'], 'Contents': data, 'ContentsFormat' : formats['json'], 'HumanReadable': md, 'ReadableContentsFormat' : formats['markdown'], 'EntryContext' : context, 'Tags' : ['tag1', 'tag2'] } Returns: None: No data returned """""" if isinstance(results, dict) and results.get(""contents""): results = results.get(""contents"") log(""demisto results: {}"".format(json.dumps(results, indent=4, sort_keys=True))) ","def results(results): """"""Outputs entries to the war-room Args: results (Union[list, dict]): The entry object or array of entry objects to output For example: results = { 'Type' : EntryType.NOTE, 'Contents': data, 'ContentsFormat' : formats['json'], 'HumanReadable': md, 'ReadableContentsFormat' : formats['markdown'], 'EntryContext' : context, 'Tags' : ['tag1', 'tag2'] } Returns: None: No data returned """""" if isinstance(results, dict) and results.get(""contents""): results = results.get(""contents"") log(""demisto results: {}"".format(json.dumps(results, indent=4, sort_keys=True))) " 38297,"def make_colormap(ctuple_list, name=None, interpolate=True): """""" This generates a custom colormap based on the colors and spacings you provide. Enter a ctuple_list, which consists of tuples of (color, spacing) to return a colormap appropriate for use in yt. If you specify a name, it will automatically be added to the current session as a valid colormap. Output colormap is in the format yt expects for adding a colormap to the current session: a dictionary with the appropriate RGB channels each consisting of a 256x3 array : First number is the number at which we are defining a color breakpoint Second number is the (0..1) number to interpolate to when coming *from below* Third number is the (0..1) number to interpolate to when coming *from above* Parameters ---------- ctuple_list: list of (color, float) tuples The ctuple_list consists of pairs of (color, interval) tuples identifying the colors to use in the colormap and the intervals they take to change to the next color in the list. A color can either be a string of the name of a color, or it can be an array of 3 floats, each representing the intensity of R, G, and B on a scale of 0 to 1. Valid color names and their equivalent arrays are listed below. Any interval can be given for the different color tuples, and the total of all the intervals will be scaled to the 256 output elements. If a ctuple_list ends with a color and a non-zero interval, a white 0-interval would be added to the end to finish the interpolation. To avoid finishing with white, specify your own zero-interval color at the end. name: string, optional If you wish this colormap to be added as a valid colormap to the current session, specify a name here. Default: None interpolation: boolean, optional Designates whether or not the colormap will interpolate between the colors provided or just give solid colors across the intervals. Default: True Preset Color Options -------------------- 'white' : np.array([255, 255, 255 ])/255. 'gray' : np.array([130, 130, 130])/255. 'dgray' : np.array([80, 80, 80])/255. 'black' : np.array([0, 0, 0])/255. 'blue' : np.array([0, 0, 255])/255. 'dblue' : np.array([0, 0, 160])/255. 'purple' : np.array([100, 0, 200])/255. 'dpurple' : np.array([66, 0, 133])/255. 'dred' : np.array([160, 0, 0])/255. 'red' : np.array([255, 0, 0])/255. 'orange' : np.array([255, 128, 0])/255. 'dorange' : np.array([200,100, 0])/255. 'yellow' : np.array([255, 255, 0])/255. 'dyellow' : np.array([200, 200, 0])/255. 'green' : np.array([0, 255, 0])/255. 'dgreen' : np.array([0, 160, 0])/255. Examples -------- To obtain a colormap that starts at black with equal intervals in green, blue, red, yellow in that order and interpolation between those colors. (In reality, it starts at black, takes an interval of 10 to interpolate to green, then an interval of 10 to interpolate to blue, then an interval of 10 to interpolate to red.) >>> cm = make_colormap([('black', 10), ('green', 10), ('blue', 10), ... ('red', 0)]) To add a colormap that has five equal blocks of solid major colors to the current session as ""steps"": >>> make_colormap([('red', 10), ('orange', 10), ('yellow', 10), ... ('green', 10), ('blue', 10)], name=""steps"", ... interpolate=False) To add a colormap that looks like the French flag (i.e. equal bands of blue, white, and red) using your own RGB keys, then to display it: >>> make_colormap([([0,0,1], 10), ([1,1,1], 10), ([1,0,0], 10)], ... name='french_flag', interpolate=False) >>> show_colormaps(['french_flag']) """""" # aliases for different colors color_dict = { 'white' : np.array([255, 255, 255 ])/255., 'gray' : np.array([130, 130, 130])/255., 'dgray' : np.array([80, 80, 80])/255., 'black' : np.array([0, 0, 0])/255., 'blue' : np.array([0, 0, 255])/255., 'dblue' : np.array([0, 0, 160])/255., 'purple' : np.array([100, 0, 200])/255., 'dpurple' : np.array([66, 0, 133])/255., 'dred' : np.array([160, 0, 0])/255., 'red' : np.array([255, 0, 0])/255., 'orange' : np.array([255, 128, 0])/255., 'dorange' : np.array([200,100, 0])/255., 'yellow' : np.array([255, 255, 0])/255., 'dyellow' : np.array([200, 200, 0])/255., 'green' : np.array([0, 255, 0])/255., 'dgreen' : np.array([0, 160, 0])/255.} cmap = np.zeros((256,3)) # If the user provides a list with a non-zero final interval, it # doesn't make sense because you have an interval but no final # color to which it interpolates. So provide a 0-length white final # interval to end the previous interval in white. if ctuple_list[-1][1] != 0: ctuple_list.append(('white', 0)) # Figure out how many intervals there are total. rolling_index = 0 for i, (color, interval) in enumerate(ctuple_list): if isinstance(color, string_types): ctuple_list[i] = (color_dict[color], interval) rolling_index += interval scale = 256./rolling_index n = len(ctuple_list) # Step through each ctuple and interpolate from one color to the # next over the interval provided rolling_index = 0 for i in range(n-1): color, interval = ctuple_list[i] interval *= scale next_index = rolling_index + interval next_color, next_interval = ctuple_list[i+1] if not interpolate: next_color = color # Interpolate the R, G, and B channels from one color to the next # Use np.round to make sure you're on a discrete index interval = int(np.round(next_index)-np.round(rolling_index)) for j in np.arange(3): cmap[int(np.rint(rolling_index)):int(np.rint(next_index)), j] = \ np.linspace(color[j], next_color[j], num=interval) rolling_index = next_index # Return a dictionary with the appropriate RGB channels each consisting of # a 256x3 array in the format that is expected by add_cmap() to add a # colormap to the session. # The format is as follows: # First number is the number at which we are defining a color breakpoint # Second number is the (0..1) number to interpolate to when coming *from below* # Third number is the (0..1) number to interpolate to when coming *from above* _vs = np.linspace(0,1,256) cdict = {'red': np.transpose([_vs, cmap[:,0], cmap[:,0]]), 'green': np.transpose([_vs, cmap[:,1], cmap[:,1]]), 'blue': np.transpose([_vs, cmap[:,2], cmap[:,2]])} if name is not None: add_cmap(name, cdict) return cdict ","def make_colormap(ctuple_list, name=None, interpolate=True): """""" This generates a custom colormap based on the colors and spacings you provide. Enter a ctuple_list, which consists of tuples of (color, spacing) to return a colormap appropriate for use in yt. If you specify a name, it will automatically be added to the current session as a valid colormap. Output colormap is in the format yt expects for adding a colormap to the current session: a dictionary with the appropriate RGB channels each consisting of a 256x3 array : First number is the number at which we are defining a color breakpoint Second number is the (0..1) number to interpolate to when coming *from below* Third number is the (0..1) number to interpolate to when coming *from above* Parameters ---------- ctuple_list: list of (color, float) tuples The ctuple_list consists of pairs of (color, interval) tuples identifying the colors to use in the colormap and the intervals they take to change to the next color in the list. A color can either be a string of the name of a color, or it can be an array of 3 floats, each representing the intensity of R, G, and B on a scale of 0 to 1. Valid color names and their equivalent arrays are listed below. Any interval can be given for the different color tuples, and the total of all the intervals will be scaled to the 256 output elements. If a ctuple_list ends with a color and a non-zero interval, a white 0-interval would be added to the end to finish the interpolation. To avoid finishing with white, specify your own zero-interval color at the end. name: string, optional If you wish this colormap to be added as a valid colormap to the current session, specify a name here. Default: None interpolation: boolean, optional Designates whether or not the colormap will interpolate between the colors provided or just give solid colors across the intervals. Default: True Preset Color Options -------------------- 'white' : np.array([255, 255, 255 ])/255. 'gray' : np.array([130, 130, 130])/255. 'dgray' : np.array([80, 80, 80])/255. 'black' : np.array([0, 0, 0])/255. 'blue' : np.array([0, 0, 255])/255. 'dblue' : np.array([0, 0, 160])/255. 'purple' : np.array([100, 0, 200])/255. 'dpurple' : np.array([66, 0, 133])/255. 'dred' : np.array([160, 0, 0])/255. 'red' : np.array([255, 0, 0])/255. 'orange' : np.array([255, 128, 0])/255. 'dorange' : np.array([200,100, 0])/255. 'yellow' : np.array([255, 255, 0])/255. 'dyellow' : np.array([200, 200, 0])/255. 'green' : np.array([0, 255, 0])/255. 'dgreen' : np.array([0, 160, 0])/255. Examples -------- To obtain a colormap that starts at black with equal intervals in green, blue, red, yellow in that order and interpolation between those colors. (In reality, it starts at black, takes an interval of 10 to interpolate to green, then an interval of 10 to interpolate to blue, then an interval of 10 to interpolate to red.) >>> cm = make_colormap([('black', 10), ('green', 10), ('blue', 10), ... ('red', 0)]) To add a colormap that has five equal blocks of solid major colors to the current session as ""steps"": >>> make_colormap([('red', 10), ('orange', 10), ('yellow', 10), ... ('green', 10), ('blue', 10)], name=""steps"", ... interpolate=False) To add a colormap that looks like the French flag (i.e. equal bands of blue, white, and red) using your own RGB keys, then to display it: >>> make_colormap([([0,0,1], 10), ([1,1,1], 10), ([1,0,0], 10)], ... name='french_flag', interpolate=False) >>> show_colormaps(['french_flag']) """""" # aliases for different colors color_dict = { 'white' : np.array([255, 255, 255 ])/255., 'gray' : np.array([130, 130, 130])/255., 'dgray' : np.array([80, 80, 80])/255., 'black' : np.array([0, 0, 0])/255., 'blue' : np.array([0, 0, 255])/255., 'dblue' : np.array([0, 0, 160])/255., 'purple' : np.array([100, 0, 200])/255., 'dpurple' : np.array([66, 0, 133])/255., 'dred' : np.array([160, 0, 0])/255., 'red' : np.array([255, 0, 0])/255., 'orange' : np.array([255, 128, 0])/255., 'dorange' : np.array([200,100, 0])/255., 'yellow' : np.array([255, 255, 0])/255., 'dyellow' : np.array([200, 200, 0])/255., 'green' : np.array([0, 255, 0])/255., 'dgreen' : np.array([0, 160, 0])/255.} cmap = np.zeros((256,3)) # If the user provides a list with a non-zero final interval, it # doesn't make sense because you have an interval but no final # color to which it interpolates. So provide a 0-length white final # interval to end the previous interval in white. if ctuple_list[-1][1] != 0: ctuple_list.append(('white', 0)) # Figure out how many intervals there are total. rolling_index = 0 for i, (color, interval) in enumerate(ctuple_list): if isinstance(color, string_types): ctuple_list[i] = (color_dict[color], interval) rolling_index += interval scale = 256./rolling_index n = len(ctuple_list) # Step through each ctuple and interpolate from one color to the # next over the interval provided rolling_index = 0 for i in range(n-1): color, interval = ctuple_list[i] interval *= scale next_index = rolling_index + interval next_color, next_interval = ctuple_list[i+1] if not interpolate: next_color = color # Interpolate the R, G, and B channels from one color to the next # Use np.round to make sure you're on a discrete index interval = (np.rint(next_index) - np.rint(rolling_index)).astype(int) for j in np.arange(3): cmap[int(np.rint(rolling_index)):int(np.rint(next_index)), j] = \ np.linspace(color[j], next_color[j], num=interval) rolling_index = next_index # Return a dictionary with the appropriate RGB channels each consisting of # a 256x3 array in the format that is expected by add_cmap() to add a # colormap to the session. # The format is as follows: # First number is the number at which we are defining a color breakpoint # Second number is the (0..1) number to interpolate to when coming *from below* # Third number is the (0..1) number to interpolate to when coming *from above* _vs = np.linspace(0,1,256) cdict = {'red': np.transpose([_vs, cmap[:,0], cmap[:,0]]), 'green': np.transpose([_vs, cmap[:,1], cmap[:,1]]), 'blue': np.transpose([_vs, cmap[:,2], cmap[:,2]])} if name is not None: add_cmap(name, cdict) return cdict " 30163,"def fetch_production(zone_key='US-PR', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)) -> dict: """"""Requests the last known production mix (in MW) of a given region."""""" global renewable_output if target_datetime is not None: raise NotImplementedError('The datasource currently implemented is only real time') r = session or requests.session() data = { #To be returned as response data 'zoneKey': zone_key, #'datetime': '2017-01-01T00:00:00Z', 'production': { 'biomass': 0.0, 'coal': 0.0, 'gas': 0.0, 'hydro': 0.0, 'nuclear': 0.0, 'oil': 0.0, 'solar': 0.0, 'wind': 0.0, 'geothermal': 0.0, 'unknown': 0.0 }, # 'storage': { # 'hydro': -10.0, # }, 'source': 'aeepr.com' } renewable_output = 0.0 #Temporarily stored here. We'll subtract solar, wind and biomass (landfill gas) from it and assume the remainder, if any, is hydro #Step 1: fetch production by generation type #Note: seems to be rounded down (to an integer) #Total at the top of the page fetched in step 3 isn't rounded down, but seems to be lagging behind sometimes. #Difference is only minor, so for now we will IGNORE that total (instead of trying to parse the total and addding the difference to ""unknown"") res = r.get(GENERATION_BREAKDOWN_URL) assert res.status_code == 200, 'Exception when fetching production for ' \ '{}: error when calling url={}'.format( zone_key, GENERATION_BREAKDOWN_URL) sourceData = extract_data(res.text) logger.debug(f""Raw generation breakdown: {sourceData}"", extra={""key"": zone_key}) for item in sourceData:#Item has a label with fuel type + generation in MW, and a value with a percentage if(item['label'] == "" MW""):#There's one empty item for some reason. Skip it. continue logger.debug(item['label'], extra={""key"": zone_key}) parsedLabel = re.search(r""^(.+?)\s+(\d+)\s+MW$"", item['label']) category = parsedLabel.group(1)#E.g. GAS NATURAL outputInMW = float(parsedLabel.group(2)) if(category == ""BUNKER C"" or category == ""DIESEL CC"" or category == ""DIESEL GT""): data['production']['oil'] += outputInMW elif(category == ""GAS NATURAL""): data['production']['gas'] += outputInMW elif(category == ""CARBON""): data['production']['coal'] += outputInMW elif(category == ""RENOVABLES""): renewable_output += outputInMW #Temporarily store aggregate renewable output. We'll subtract solar, wind and biomass (landfill gas) from it and assume the remainder, if any, is hydro else: logger.warn(f""Unknown energy type \""{category}\"" is present for Puerto Rico"", extra={""key"": zone_key}) logger.info(f""Category \""{category}\"" produces {outputInMW}MW"", extra={""key"": zone_key}) #Step 2: fetch renewable production breakdown #Data from this source isn't rounded. Assume renewable production not accounted for is hydro res = r.get(RENEWABLES_BREAKDOWN_URL) assert res.status_code == 200, 'Exception when fetching renewable production for ' \ '{}: error when calling url={}'.format( zone_key, RENEWABLES_BREAKDOWN_URL) sourceData = extract_data(res.text) logger.debug(f""Raw renewable generation breakdown: {sourceData}"", extra={""key"": zone_key}) original_renewable_output = renewable_output#If nothing gets subtracted renewable_output, there probably was no data on the renewables breakdown page logger.debug(f""Total (unspecified) renewable output from total generation breakdown: {original_renewable_output}MW"", extra={""key"": zone_key}) for item in sourceData:#Somewhat different from above, the item's label has the generation type and the item's value has generation in MW if(item['label'] == "" ""):#There's one empty item for some reason. Skip it. continue if(item['label'] == ""Solar""): data['production']['solar'] += float(item['value']) elif(item['label'] == ""Eolica""): data['production']['wind'] += float(item['value']) elif(item['label'] == ""Landfill Gas""): data['production']['biomass'] += float(item['value']) else: logger.warn(f""Unknown renewable type \""{item['label']}\"" is present for Puerto Rico"", extra={""key"": zone_key}) renewable_output -= float(item['value'])#Subtract production accounted for from the renewable output total logger.info(f""Renewable \""{item['label']}\"" produces {item['value']}MW"", extra={""key"": zone_key}) logger.debug(f""Renewable output yet to be accounted for: {renewable_output}MW"", extra={""key"": zone_key}) logger.debug(""Rounding remaining renewable output to 14 decimal places to get rid of floating point errors""); renewable_output=round(renewable_output,14) logger.info(f""Remaining renewable output not accounted for: {renewable_output}MW"", extra={""key"": zone_key}) #Assume renewable generation not accounted for is hydro - if we could fetch the other renewable generation data if(renewable_output >= 0.0): if(original_renewable_output == renewable_output):#Nothing got subtracted for Solar, Wind or Landfill gas - so the page probably didn't contain any data. Renewable type=unknown logger.warning(f""Renewable generation breakdown page was empty, reporting unspecified renewable output ({renewable_output}MW) as 'unknown'"", extra={""key"": zone_key}) data['production']['unknown'] += renewable_output else:#Otherwise, any remaining renewable output is probably hydro logger.info(f""Assuming remaining renewable output of {renewable_output}MW is hydro"", extra={""key"": zone_key}) data['production']['hydro'] += renewable_output else: logger.warn(f""Renewable generation breakdown page total is greater than total renewable output, a difference of {renewable_output}MW"", extra={""key"": zone_key}) #Step 3: fetch the timestamp, which is at the bottom of a different iframe #Note: there's a race condition here when requesting data very close to :10 and :40, which is when the data gets updated #Sometimes it's some seconds later, so we grab the timestamp from here to know the exact moment res = r.get(TIMESTAMP_URL)#TODO do we know for sure the timestamp on this page gets updated *every time* the generation breakdown gets updated? assert res.status_code == 200, 'Exception when fetching timestamp for ' \ '{}: error when calling url={}'.format( zone_key, TIMESTAMP_URL) raw_timestamp_match = re.search(r""Ultima Actualizaci�n: ((?:0[1-9]|1[0-2])/(?:[0-2][0-9]|3[0-2])/2[01][0-9]{2} [0-2][0-9]:[0-5][0-9]:[0-5][0-9] [AP]M)"", res.text) if raw_timestamp_match is None: raise Exception(f""Could not find timestamp in {res.text}"") raw_timestamp = raw_timestamp_match.group() logger.debug(f""RAW TIMESTAMP: {raw_timestamp}"", extra={""key"": zone_key}) data['datetime'] = convert_timestamp(zone_key, raw_timestamp) assert data['production']['oil'] > 0.0, '{} is missing required generation type: oil'.format(zone_key) return data ","def fetch_production(zone_key='US-PR', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)) -> dict: """"""Requests the last known production mix (in MW) of a given region."""""" global renewable_output if target_datetime is not None: raise NotImplementedError('The datasource currently implemented is only real time') r = session or requests.session() data = { #To be returned as response data 'zoneKey': zone_key, #'datetime': '2017-01-01T00:00:00Z', 'production': { 'biomass': 0.0, 'coal': 0.0, 'gas': 0.0, 'hydro': 0.0, 'nuclear': 0.0, 'oil': 0.0, 'solar': 0.0, 'wind': 0.0, 'geothermal': 0.0, 'unknown': 0.0 }, # 'storage': { # 'hydro': -10.0, # }, 'source': 'aeepr.com' } renewable_output = 0.0 #Temporarily stored here. We'll subtract solar, wind and biomass (landfill gas) from it and assume the remainder, if any, is hydro #Step 1: fetch production by generation type #Note: seems to be rounded down (to an integer) #Total at the top of the page fetched in step 3 isn't rounded down, but seems to be lagging behind sometimes. #Difference is only minor, so for now we will IGNORE that total (instead of trying to parse the total and addding the difference to ""unknown"") res = r.get(GENERATION_BREAKDOWN_URL) assert res.status_code == 200, 'Exception when fetching production for ' \ '{}: error when calling url={}'.format( zone_key, GENERATION_BREAKDOWN_URL) sourceData = extract_data(res.text) logger.debug(f""Raw generation breakdown: {sourceData}"", extra={""key"": zone_key}) for item in sourceData:#Item has a label with fuel type + generation in MW, and a value with a percentage if(item['label'] == "" MW""):#There's one empty item for some reason. Skip it. continue logger.debug(item['label'], extra={""key"": zone_key}) parsedLabel = re.search(r""^(.+?)\s+(\d+)\s+MW$"", item['label']) category = parsedLabel.group(1)#E.g. GAS NATURAL outputInMW = float(parsedLabel.group(2)) if(category == ""BUNKER C"" or category == ""DIESEL CC"" or category == ""DIESEL GT""): data['production']['oil'] += outputInMW elif(category == ""GAS NATURAL""): data['production']['gas'] += outputInMW elif(category == ""CARBON""): data['production']['coal'] += outputInMW elif(category == ""RENOVABLES""): renewable_output += outputInMW #Temporarily store aggregate renewable output. We'll subtract solar, wind and biomass (landfill gas) from it and assume the remainder, if any, is hydro else: logger.warn(f""Unknown energy type \""{category}\"" is present for Puerto Rico"", extra={""key"": zone_key}) logger.info(f""Category \""{category}\"" produces {outputInMW}MW"", extra={""key"": zone_key}) #Step 2: fetch renewable production breakdown #Data from this source isn't rounded. Assume renewable production not accounted for is hydro res = r.get(RENEWABLES_BREAKDOWN_URL) assert res.status_code == 200, 'Exception when fetching renewable production for ' \ '{}: error when calling url={}'.format( zone_key, RENEWABLES_BREAKDOWN_URL) sourceData = extract_data(res.text) logger.debug(f""Raw renewable generation breakdown: {sourceData}"", extra={""key"": zone_key}) original_renewable_output = renewable_output#If nothing gets subtracted renewable_output, there probably was no data on the renewables breakdown page logger.debug(f""Total (unspecified) renewable output from total generation breakdown: {original_renewable_output}MW"", extra={""key"": zone_key}) for item in sourceData:#Somewhat different from above, the item's label has the generation type and the item's value has generation in MW if(item['label'] == "" ""):#There's one empty item for some reason. Skip it. continue if(item['label'] == ""Solar""): data['production']['solar'] += float(item['value']) elif(item['label'] == ""Eolica""): data['production']['wind'] += float(item['value']) elif(item['label'] == ""Landfill Gas""): data['production']['biomass'] += float(item['value']) else: logger.warn(f""Unknown renewable type \""{item['label']}\"" is present for Puerto Rico"", extra={""key"": zone_key}) renewable_output -= float(item['value'])#Subtract production accounted for from the renewable output total logger.info(f""Renewable \""{item['label']}\"" produces {item['value']}MW"", extra={""key"": zone_key}) logger.debug(f""Renewable output yet to be accounted for: {renewable_output}MW"", extra={""key"": zone_key}) logger.debug(""Rounding remaining renewable output to 14 decimal places to get rid of floating point errors""); renewable_output=round(renewable_output,14) logger.info(f""Remaining renewable output not accounted for: {renewable_output}MW"", extra={""key"": zone_key}) #Assume renewable generation not accounted for is hydro - if we could fetch the other renewable generation data if(renewable_output >= 0.0): if(original_renewable_output == renewable_output):#Nothing got subtracted for Solar, Wind or Landfill gas - so the page probably didn't contain any data. Renewable type=unknown logger.warning(f""Renewable generation breakdown page was empty, reporting unspecified renewable output ({renewable_output}MW) as 'unknown'"", extra={""key"": zone_key}) data['production']['unknown'] += renewable_output else:#Otherwise, any remaining renewable output is probably hydro logger.info(f""Assuming remaining renewable output of {renewable_output}MW is hydro"", extra={""key"": zone_key}) data['production']['hydro'] += renewable_output else: logger.warn(f""Renewable generation breakdown page total is greater than total renewable output, a difference of {renewable_output}MW"", extra={""key"": zone_key}) #Step 3: fetch the timestamp, which is at the bottom of a different iframe #Note: there's a race condition here when requesting data very close to :10 and :40, which is when the data gets updated #Sometimes it's some seconds later, so we grab the timestamp from here to know the exact moment res = r.get(TIMESTAMP_URL)#TODO do we know for sure the timestamp on this page gets updated *every time* the generation breakdown gets updated? assert res.status_code == 200, 'Exception when fetching timestamp for ' \ '{}: error when calling url={}'.format( zone_key, TIMESTAMP_URL) raw_timestamp_match = re.search(r""Ultima Actualizaci�n: ((?:0[1-9]|1[0-2])/(?:[0-2][0-9]|3[0-2])/2[01][0-9]{2} [0-2][0-9]:[0-5][0-9]:[0-5][0-9] [AP]M)"", res.text) if raw_timestamp_match is None: raise Exception(f""Could not find timestamp in {res.text}"") raw_timestamp = raw_timestamp_match.group(1) logger.debug(f""RAW TIMESTAMP: {raw_timestamp}"", extra={""key"": zone_key}) data['datetime'] = convert_timestamp(zone_key, raw_timestamp) assert data['production']['oil'] > 0.0, '{} is missing required generation type: oil'.format(zone_key) return data " 23172,"def solve(a, b, assume_a=""gen""): """""" Solve the equation ``a x = b`` for ``x``. By default, use LU decomposition and forward / backward substitutions. When ``assume_a = ""pos""`` use Cholesky decomposition. Parameters ---------- a : (M, M) array_like A square matrix. b : (M,) or (M, N) array_like Right-hand side matrix in ``a x = b``. assume_a : {""gen"", ""pos""}, optional Type of data matrix. It is used to choose the dedicated solver. Note that Dask does not support ""her"" and ""sym"" types. .. versionchanged:: 2022.8.0 ``assume_a = ""pos""`` was previously defined as ``sym_pos=True``. Returns ------- x : (M,) or (M, N) Array Solution to the system ``a x = b``. Shape of the return matches the shape of `b`. See Also -------- scipy.linalg.solve """""" if assume_a in [""sym"", ""her""]: raise NotImplementedError( ""``da.linalg.solve`` only supports ``assume_a =`` ``gen`` or ``pos``"" ) if assume_a == ""pos"": l, u = _cholesky(a) elif assume_a == ""gen"": p, l, u = lu(a) b = p.T.dot(b) else: raise ValueError(f""{assume_a} is not a recognized matrix structure"") uy = solve_triangular(l, b, lower=True) return solve_triangular(u, uy) ","def solve(a, b, assume_a=""gen""): """""" Solve the equation ``a x = b`` for ``x``. By default, use LU decomposition and forward / backward substitutions. When ``assume_a = ""pos""`` use Cholesky decomposition. Parameters ---------- a : (M, M) array_like A square matrix. b : (M,) or (M, N) array_like Right-hand side matrix in ``a x = b``. assume_a : {""gen"", ""pos""}, optional Type of data matrix. It is used to choose the dedicated solver. Note that Dask does not support ""her"" and ""sym"" types. .. versionchanged:: 2022.8.0 ``assume_a=""pos""`` was previously defined as ``sym_pos=True``. Returns ------- x : (M,) or (M, N) Array Solution to the system ``a x = b``. Shape of the return matches the shape of `b`. See Also -------- scipy.linalg.solve """""" if assume_a in [""sym"", ""her""]: raise NotImplementedError( ""``da.linalg.solve`` only supports ``assume_a =`` ``gen`` or ``pos``"" ) if assume_a == ""pos"": l, u = _cholesky(a) elif assume_a == ""gen"": p, l, u = lu(a) b = p.T.dot(b) else: raise ValueError(f""{assume_a} is not a recognized matrix structure"") uy = solve_triangular(l, b, lower=True) return solve_triangular(u, uy) " 545,"def values_list(hits, *fields, **kwargs): """"""modeled after django's QuerySet.values_list"""""" flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),)) if flat and len(fields) > 1: raise TypeError(""'flat' is not valid when values_list is called with more than one field."") if not fields: raise TypeError('must be called with at least one field') if flat: field, = fields return [hit[field] for hit in hits] else: return [tuple(hit.get(field) for field in fields) for hit in hits] ","def values_list(hits, *fields, **kwargs): """"""modeled after django's QuerySet.values_list"""""" flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),)) if flat and len(fields) > 1: raise TypeError(""'flat' is not valid when values_list is called with more than one field."") if not fields: raise TypeError('must be called with at least one field') if flat: field, = fields return [hit[field] for hit in hits] else: return [tuple(hit[field] for field in fields if field in hit) for hit in hits] " 4683,"def test_timedelta(): """""" test that timedelta objects are properly translated into days """""" dt = [datetime.datetime(2000, 1, 1, 0, 0, 0), datetime.timedelta(days=1, hours=2)] assert mdates.date2num(dt[1]) == 1 + 2 / 24 # check that mixed lists work.... assert mdates.date2num(dt)[0] == 730120.0 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = (np.datetime64('2000-01-01'), np.timedelta64(26, 'h')) assert mdates.date2num(dt[1]) == 1 + 2 / 24 # check that mixed lists work.... assert mdates.date2num(dt)[0] == 730120.0 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = [datetime.timedelta(days=1, hours=1), datetime.timedelta(days=1, hours=2)] assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = (np.timedelta64(25, 'h'), np.timedelta64(26, 'h')) assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = np.array([25, 26], dtype='timedelta64[h]') assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 ","def test_timedelta(): """""" Test that timedelta objects are properly translated into days. """""" dt = [datetime.datetime(2000, 1, 1, 0, 0, 0), datetime.timedelta(days=1, hours=2)] assert mdates.date2num(dt[1]) == 1 + 2 / 24 # check that mixed lists work.... assert mdates.date2num(dt)[0] == 730120.0 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = (np.datetime64('2000-01-01'), np.timedelta64(26, 'h')) assert mdates.date2num(dt[1]) == 1 + 2 / 24 # check that mixed lists work.... assert mdates.date2num(dt)[0] == 730120.0 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = [datetime.timedelta(days=1, hours=1), datetime.timedelta(days=1, hours=2)] assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = (np.timedelta64(25, 'h'), np.timedelta64(26, 'h')) assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 dt = np.array([25, 26], dtype='timedelta64[h]') assert mdates.date2num(dt)[0] == 1 + 1 / 24 assert mdates.date2num(dt)[1] == 1 + 2 / 24 " 11689,"def test_join_sso_user_with_existing_user_by_email(dynamic_settings, sso_user): user = get_or_create_user( Mock(settings=dynamic_settings), {""id"": 5, ""username"": ""Smith"", ""email"": ""user@example.com"", ""is_active"": False}, ) assert user.sso_id == SSO_ID assert user.username == ""User"" assert user.email == ""user@example.com"" assert user.is_active is True ","def test_merge_sso_user_with_existing_user_by_email(dynamic_settings, sso_user): user = get_or_create_user( Mock(settings=dynamic_settings), {""id"": 5, ""username"": ""Smith"", ""email"": ""user@example.com"", ""is_active"": False}, ) assert user.sso_id == SSO_ID assert user.username == ""User"" assert user.email == ""user@example.com"" assert user.is_active is True " 7663,"def is_legacy_signed_url_valid(user, url): """"""Check whether a legacy signed URL is valid for a user. This util is deprecated and only exists because people may be actively using URLs using the old style token. Any new code should use the new :func:`signed_url_for_user` and :func:`verify_signed_user_url` utils which encode the user id within the signature. """""" parsed = url_parse(url) params = url_decode(parsed.query) try: signature = params.pop('token') except KeyError: return False url = url_unparse(( '', '', parsed.path, url_encode(sorted(params.items()), sort=True), parsed.fragment )) signer = Signer(user.signing_secret, salt='url-signing') return signer.verify_signature(url.encode(), signature) ","def is_legacy_signed_url_valid(user, url): """"""Check whether a legacy signed URL is valid for a user. This util is deprecated and only exists because people may be actively using URLs using the old style token. Any new code should use the new :func:`signed_url_for_user` and :func:`verify_signed_user_url` utils which encode the user id within the signature. """""" parsed = url_parse(url) params = url_decode(parsed.query) try: signature = params.pop('token') except KeyError: return False url = url_unparse(( '', '', parsed.path, url_encode(sorted(params.items()), sort=True), parsed.fragment )) signer = Signer(user.signing_secret, salt='url-signing') return signer.verify_signature(url.encode(), signature.encode()) " 57949,"def apply_filters(incidents: List, args: Dict): acceptable_names = set(argToList(args.get('name'))) acceptable_types = set(argToList(args.get('type'))) filtered_incidents = [] for incident in incidents: if acceptable_names and incident['name'] not in acceptable_names: continue if acceptable_types and incident['type'] not in acceptable_types: continue filtered_incidents.append(incident) return filtered_incidents ","def apply_filters(incidents: List, args: Dict): names_to_filter = set(argToList(args.get('name'))) types_to_filter = set(argToList(args.get('type'))) filtered_incidents = [] for incident in incidents: if names_to_filter and incident['name'] not in names_to_filter: continue if types_to_filter and incident['type'] not in types_to_filter: continue filtered_incidents.append(incident) return filtered_incidents " 4953,"def _get_nonzero_slices(buf): """""" Return the bounds of the nonzero region of a 2D array as a pair of slices. ``buf[_get_drawn_slices(buf)]`` is the smallest sub-rectangle in *buf* that encloses all non-zero entries in *buf*. If *buf* is fully zero, then ``(slice(0, 0), slice(0, 0)) is returned. """""" x_nz, = buf.any(axis=0).nonzero() y_nz, = buf.any(axis=1).nonzero() if len(x_nz) and len(y_nz): l, r = x_nz[[0, -1]] b, t = y_nz[[0, -1]] return slice(b, t+1), slice(l, r+1) else: return slice(0, 0), slice(0, 0) ","def _get_nonzero_slices(buf): """""" Return the bounds of the nonzero region of a 2D array as a pair of slices. ``buf[_get_nonzero_slices(buf)]`` is the smallest sub-rectangle in *buf* that encloses all non-zero entries in *buf*. If *buf* is fully zero, then ``(slice(0, 0), slice(0, 0)) is returned. """""" x_nz, = buf.any(axis=0).nonzero() y_nz, = buf.any(axis=1).nonzero() if len(x_nz) and len(y_nz): l, r = x_nz[[0, -1]] b, t = y_nz[[0, -1]] return slice(b, t+1), slice(l, r+1) else: return slice(0, 0), slice(0, 0) " 5911,"def check_error(error, result): return_code = error.get('code') if return_code: assert result.returncode == return_code stderr = error.get('stderr') if not stderr: return if isinstance(stderr, str): patters = [stderr] elif isinstance(stderr, list): patters = stderr else: raise ""string or list expected, found %r"" % stderr for patter in patters: pat = re.compile(patter, re.I) match = pat.search(result.stderr) assert match, 'regex %r not found in stderr: %r' % ( stderr, result.stderr) ","def check_error(error, result): return_code = error.get('code') if return_code: assert result.returncode == return_code stderr = error.get('stderr') if not stderr: return if isinstance(stderr, str): patters = [stderr] elif isinstance(stderr, list): patters = stderr else: raise ""string or list expected, found %r"" % stderr for patter in patters: match = re.search(patter, result.stderr) assert match, 'regex %r not found in stderr: %r' % ( stderr, result.stderr) " 30384,"def rasterize_pdf_command(): entry_id = demisto.args().get('EntryID') password = demisto.args().get('pdfPassword') max_pages = int(demisto.args().get('maxPages', 30)) horizontal = demisto.args().get('horizontal', 'false') == 'true' file_path = demisto.getFilePath(entry_id).get('path') filename = 'image.jpeg' # type: ignore with open(file_path, 'rb') as f: output = convert_pdf_to_jpeg(path=os.path.realpath(f.name), max_pages=max_pages, password=password, horizontal=horizontal) file = fileResult(filename=filename, data=output) file['Type'] = entryTypes['image'] demisto.results(file) ","def rasterize_pdf_command(): entry_id = demisto.args().get('EntryID') password = demisto.args().get('pdfPassword') max_pages = int(demisto.args().get('maxPages', 30)) horizontal = demisto.args().get('horizontal', 'false') == 'true' file_path = demisto.getFilePath(entry_id).get('path') filename = 'image.jpeg' # type: ignore with open(file_path, 'rb') as f: output = convert_pdf_to_jpeg(path=os.path.realpath(f.name), max_pages=max_pages, password=password, horizontal=horizontal) file_ = fileResult(filename=filename, data=output) file['Type'] = entryTypes['image'] demisto.results(file) " 1558,"def test_recursion_decision_tree_vs_forest_and_gbdt(): # Make sure that the recursion method gives the same results on a # DecisionTreeRegressor and a GradientBoostingRegressor or a # RandomForestRegressor with 1 tree and equivalent parameters. # Purely random dataset to avoid correlated features n_samples = 100 n_features = 5 X = np.random.RandomState(0).randn(n_samples, n_features) y = np.random.RandomState(0).randn(n_samples) # The 'init' estimator for GBDT (here the average prediction) isn't taken # into account with the recursion method, for technical reasons. We set # the mean to 0 to that this 'bug' doesn't have any effect. y = y - y.mean() # set max_depth not too high to avoid splits with same gain but different # features max_depth = 5 forest = RandomForestRegressor(n_estimators=1, max_features=None, bootstrap=False, max_depth=max_depth, random_state=0) # The forest will use ensemble.base._set_random_states to set the # random_state of the tree sub-estimator. We simulate this here to have # equivalent estimators. equiv_random_state = check_random_state(0).randint(MAX_RAND_SEED) gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1, criterion='mse', max_depth=max_depth, random_state=equiv_random_state) tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state) forest.fit(X, y) gbdt.fit(X, y) tree.fit(X, y) # sanity check try: assert_is_subtree(tree.tree_, gbdt[0, 0].tree_) assert_is_subtree(tree.tree_, forest[0].tree_) except AssertionError: # For some reason the trees aren't exactly equal on 32bits, so the PDs # cannot be equal either. assert _IS_32BIT return grid = np.random.RandomState(0).randn(50).reshape(-1, 1) for f in range(n_features): features = np.array([f], dtype=np.int32) pdp_forest = _partial_dependence_recursion(forest, grid, features) pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features) pdp_tree = _partial_dependence_recursion(tree, grid, features) np.testing.assert_allclose(pdp_gbdt, pdp_tree) np.testing.assert_allclose(pdp_forest, pdp_tree) ","def test_recursion_decision_tree_vs_forest_and_gbdt(): # Make sure that the recursion method gives the same results on a # DecisionTreeRegressor and a GradientBoostingRegressor or a # RandomForestRegressor with 1 tree and equivalent parameters. # Purely random dataset to avoid correlated features n_samples = 100 n_features = 5 X = np.random.RandomState(0).randn(n_samples, n_features) y = np.random.RandomState(0).randn(n_samples) # The 'init' estimator for GBDT (here the average prediction) isn't taken # into account with the recursion method, for technical reasons. We set # the mean to 0 to that this 'bug' doesn't have any effect. y = y - y.mean() # set max_depth not too high to avoid splits with same gain but different # features max_depth = 5 forest = RandomForestRegressor(n_estimators=1, max_features=None, bootstrap=False, max_depth=max_depth, random_state=0) # The forest will use ensemble.base._set_random_states to set the # random_state of the tree sub-estimator. We simulate this here to have # equivalent estimators. equiv_random_state = check_random_state(seed).randint(MAX_RAND_SEED) gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1, criterion='mse', max_depth=max_depth, random_state=equiv_random_state) tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state) forest.fit(X, y) gbdt.fit(X, y) tree.fit(X, y) # sanity check try: assert_is_subtree(tree.tree_, gbdt[0, 0].tree_) assert_is_subtree(tree.tree_, forest[0].tree_) except AssertionError: # For some reason the trees aren't exactly equal on 32bits, so the PDs # cannot be equal either. assert _IS_32BIT return grid = np.random.RandomState(0).randn(50).reshape(-1, 1) for f in range(n_features): features = np.array([f], dtype=np.int32) pdp_forest = _partial_dependence_recursion(forest, grid, features) pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features) pdp_tree = _partial_dependence_recursion(tree, grid, features) np.testing.assert_allclose(pdp_gbdt, pdp_tree) np.testing.assert_allclose(pdp_forest, pdp_tree) " 24612,"def molecule(symbol: str, Z: Integral = None) -> Particle | CustomParticle: """""" Parses molecules symbols into a |CustomParticle| ot |Particle| if possible. Parameters ---------- symbol Symbol of the molecule to be parsed. Z charge number if not present in symbol. Returns ------- A |Particle| object if the input could be parsed as such, or a |CustomParticle| with the provided symbol, charge, and a mass corresponding to the sum of the molecule elements. """""" try: return Particle(symbol, Z=Z) except ParticleError: element_dict, bare_symbol, Z = _parse_and_check_molecule_input(symbol, Z) mass = 0 * u.kg for element_symbol, amount in element_dict.items(): try: element = Particle(element_symbol) except ParticleError as e: raise InvalidParticleError( f""Could not identify {element_symbol}."" ) from e mass += amount * element.mass if Z is None: charge = 0 * u.C else: charge = Z * const.e.si bare_symbol += f"" {-Z}-"" if Z < 0 else f"" {Z}+"" return CustomParticle(mass=mass, charge=charge, symbol=bare_symbol) ","def molecule(symbol: str, Z: Integral = None) -> Particle | CustomParticle: """""" Parse a molecule symbol into a |CustomParticle| or |Particle|. Parameters ---------- symbol Symbol of the molecule to be parsed. Z charge number if not present in symbol. Returns ------- A |Particle| object if the input could be parsed as such, or a |CustomParticle| with the provided symbol, charge, and a mass corresponding to the sum of the molecule elements. """""" try: return Particle(symbol, Z=Z) except ParticleError: element_dict, bare_symbol, Z = _parse_and_check_molecule_input(symbol, Z) mass = 0 * u.kg for element_symbol, amount in element_dict.items(): try: element = Particle(element_symbol) except ParticleError as e: raise InvalidParticleError( f""Could not identify {element_symbol}."" ) from e mass += amount * element.mass if Z is None: charge = 0 * u.C else: charge = Z * const.e.si bare_symbol += f"" {-Z}-"" if Z < 0 else f"" {Z}+"" return CustomParticle(mass=mass, charge=charge, symbol=bare_symbol) " 30262,"def issue_table_create(issue_list, response): """""" gets an HTTP response and a list containing several issues, sends each issue to be reformatted. Args: issue_list(list of dict): A list of issues derived from the HTTP response response (dict):A raw HTTP response sent for 'Contents' field in context Returns: The issues are sent to Demisto """""" issue_table = [] for issue in issue_list: issue_table.append(issue_format(issue)) context_create_issue(response, issue_table) ","def create_issue_table(issue_list, response): """""" gets an HTTP response and a list containing several issues, sends each issue to be reformatted. Args: issue_list(list of dict): A list of issues derived from the HTTP response response (dict):A raw HTTP response sent for 'Contents' field in context Returns: The issues are sent to Demisto """""" issue_table = [] for issue in issue_list: issue_table.append(issue_format(issue)) context_create_issue(response, issue_table) " 17445,"def open_dataset( filename_or_obj, *args, engine=None, chunks=None, cache=None, decode_cf=None, mask_and_scale=None, decode_times=None, decode_timedelta=None, use_cftime=None, concat_characters=None, decode_coords=None, drop_variables=None, backend_kwargs=None, **kwargs, ): """"""Open and decode a dataset from a file or file-like object. Parameters ---------- filename_or_obj : str, Path, file-like or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF). engine : {""netcdf4"", ""scipy"", ""pydap"", ""h5netcdf"", ""pynio"", ""cfgrib"", \ ""pseudonetcdf"", ""zarr""} or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for ""netcdf4"". A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int or dict, optional If chunks is provided, it is used to load the new dataset into dask arrays. ``chunks=-1`` loads the dataset with dask using a single chunk for all arrays. `chunks={}`` loads the dataset with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. mask_and_scale defaults to True except for the pseudonetcdf backend. This keyword may not be supported by all the backends. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. This keyword may not be supported by all the backends. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {""days"", ""hours"", ""minutes"", ""seconds"", ""milliseconds"", ""microseconds""} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. This keyword may not be supported by all the backends. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar (e.g. ""gregorian"", ""proleptic_gregorian"", ""standard"", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. This keyword may not be supported by all the backends. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. This keyword may not be supported by all the backends. decode_coords : bool or {""coordinates"", ""all""}, optional Controls which variables are set as coordinate variables: - ""coordinates"" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - ""all"": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by ""netcdf4"", ""h5netcdf"", ""zarr"". - 'lock': (Deprecated) resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by ""netcdf4"", ""h5netcdf"", ""pynio"", ""pseudonetcdf"", ""cfgrib"". See engine open function for kwargs accepted by each specific engine. Returns ------- dataset : Dataset The newly created dataset. Notes ----- ``open_dataset`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- open_mfdataset """""" if len(args) > 0: raise TypeError( ""open_dataset() takes only 1 positional argument starting from version 0.18.0, "" ""all other options must be passed as keyword arguments"" ) # TODO remove after v0.19 if kwargs.pop(""lock"", None): warnings.warn( ""The kwarg 'lock' has been deprecated, and is now"" ""ignored. In future (from v0.19) passing lock will "" ""raise an error."", DeprecationWarning, ) if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop(""overwrite_encoded_chunks"", None) backend_ds = backend.open_dataset( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) ds = _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, drop_variables=drop_variables, **decoders, **kwargs, ) return ds ","def open_dataset( filename_or_obj, *args, engine=None, chunks=None, cache=None, decode_cf=None, mask_and_scale=None, decode_times=None, decode_timedelta=None, use_cftime=None, concat_characters=None, decode_coords=None, drop_variables=None, backend_kwargs=None, **kwargs, ): """"""Open and decode a dataset from a file or file-like object. Parameters ---------- filename_or_obj : str, Path, file-like or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF). engine : {""netcdf4"", ""scipy"", ""pydap"", ""h5netcdf"", ""pynio"", ""cfgrib"", \ ""pseudonetcdf"", ""zarr""} or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for ""netcdf4"". A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int or dict, optional If chunks is provided, it is used to load the new dataset into dask arrays. ``chunks=-1`` loads the dataset with dask using a single chunk for all arrays. `chunks={}`` loads the dataset with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. mask_and_scale defaults to True except for the pseudonetcdf backend. This keyword may not be supported by all the backends. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. This keyword may not be supported by all the backends. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {""days"", ""hours"", ""minutes"", ""seconds"", ""milliseconds"", ""microseconds""} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. This keyword may not be supported by all the backends. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar (e.g. ""gregorian"", ""proleptic_gregorian"", ""standard"", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. This keyword may not be supported by all the backends. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. This keyword may not be supported by all the backends. decode_coords : bool or {""coordinates"", ""all""}, optional Controls which variables are set as coordinate variables: - ""coordinates"" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - ""all"": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by ""netcdf4"", ""h5netcdf"", ""zarr"". - 'lock': (Deprecated) resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by ""netcdf4"", ""h5netcdf"", ""pynio"", ""pseudonetcdf"", ""cfgrib"". See engine open function for kwargs accepted by each specific engine. Returns ------- dataset : Dataset The newly created dataset. Notes ----- ``open_dataset`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- open_mfdataset """""" if len(args) > 0: raise TypeError( ""open_dataset() takes only 1 positional argument starting from version 0.18.0, "" ""all other options must be passed as keyword arguments"" ) # TODO remove after v0.19 if kwargs.pop(""lock"", None): warnings.warn( ""The kwarg 'lock' has been deprecated, and is now"" ""ignored. In the future passing lock will "" ""raise an error."", DeprecationWarning, ) if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop(""overwrite_encoded_chunks"", None) backend_ds = backend.open_dataset( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) ds = _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, drop_variables=drop_variables, **decoders, **kwargs, ) return ds " 11487,"def format_samples(sdk_code_path) -> None: generate_sample_path = Path(sdk_code_path + '/generate_sample') if not os.path.exists(generate_sample_path): _LOGGER.info(f'not find generate_sample') return try: import black except Exception as e: check_call('pip install black', shell=True) import black _BLACK_MODE = black.Mode() _BLACK_MODE.line_length = 120 files = generate_sample_path.glob('**/*.py') for path in files: with open(path, 'r') as fr: file_content = fr.read() with suppress(black.NothingChanged): file_content = black.format_file_contents(file_content, fast=True, mode=_BLACK_MODE) with open(path, 'w') as fw: fw.write(file_content) _LOGGER.info(f'format generate_sample successfully') ","def format_samples(sdk_code_path) -> None: generate_sample_path = Path(sdk_code_path + '/generate_sample') if not os.path.exists(generate_sample_path): _LOGGER.info(f'not find generate_sample') return try: import black except Exception as e: check_call('pip install black', shell=True) import black _BLACK_MODE = black.Mode() _BLACK_MODE.line_length = 120 files = generate_sample_path.glob('**/*.py') for path in files: with open(path, 'r') as fr: file_content = fr.read() with suppress(black.NothingChanged): file_content = black.format_file_contents(file_content, fast=True, mode=_BLACK_MODE) with open(path, 'w') as fw: fw.write(file_content) _LOGGER.info(f'format generated_samples successfully') "