_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q500
load_parcellation_coords
train
def load_parcellation_coords(parcellation_name): """ Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates """ path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv' parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3]) return parc
python
{ "resource": "" }
q501
create_traj_ranges
train
def create_traj_ranges(start, stop, N): """ Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614 """ steps = (1.0/(N-1)) * (stop - start) if np.isscalar(steps): return steps*np.arange(N) + start else: return steps[:, None]*np.arange(N) + start[:, None]
python
{ "resource": "" }
q502
get_dimord
train
def get_dimord(measure, calc=None, community=None): """ Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure. """ if not calc: calc = '' else: calc = '_' + calc if not community: community = '' else: community = 'community' if 'community' in calc and 'community' in community: community = '' if calc == 'community_avg' or calc == 'community_pairs': community = '' dimord_dict = { 'temporal_closeness_centrality': 'node', 'temporal_degree_centrality': 'node', 'temporal_degree_centralit_avg': 'node', 'temporal_degree_centrality_time': 'node,time', 'temporal_efficiency': 'global', 'temporal_efficiency_global': 'global', 'temporal_efficiency_node': 'node', 'temporal_efficiency_to': 'node', 'sid_global': 'global,time', 'community_pairs': 'community,community,time', 'community_avg': 'community,time', 'sid': 'community,community,time', 'reachability_latency_global': 'global', 'reachability_latency': 'global', 'reachability_latency_node': 'node', 'fluctuability': 'node', 'fluctuability_global': 'global', 'bursty_coeff': 'edge,edge', 'bursty_coeff_edge': 'edge,edge', 'bursty_coeff_node': 'node', 'bursty_coeff_meanEdgePerNode': 'node', 'volatility_global': 'time', } if measure + calc + community in dimord_dict: return dimord_dict[measure + calc + community] else: print('WARNINGL: get_dimord() returned unknown dimension labels') return 'unknown'
python
{ "resource": "" }
q503
create_supraadjacency_matrix
train
def create_supraadjacency_matrix(tnet, intersliceweight=1): """ Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix """ newnetwork = tnet.network.copy() newnetwork['i'] = (tnet.network['i']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) newnetwork['j'] = (tnet.network['j']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) if 'weight' not in newnetwork.columns: newnetwork['weight'] = 1 newnetwork.drop('t', axis=1, inplace=True) timepointconns = pd.DataFrame() timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N) timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T)) timepointconns['weight'] = intersliceweight supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True) return supranet
python
{ "resource": "" }
q504
tnet_to_nx
train
def tnet_to_nx(df, t=None): """ Creates undirected networkx object """ if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist( df, source='i', target='j', edge_attr='weight') else: nxobj = nx.from_pandas_edgelist(df, source='i', target='j') return nxobj
python
{ "resource": "" }
q505
temporal_louvain
train
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ---------- """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints resolution = resolution / tnet.T supranet = create_supraadjacency_matrix( tnet, intersliceweight=intersliceweight) if negativeedge == 'ignore': supranet = supranet[supranet['weight'] > 0] nxsupra = tnet_to_nx(supranet) np.random.seed(randomseed) while True: comtmp = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)} for j in as_completed(job): comtmp.append(j.result()) comtmp = np.stack(comtmp) comtmp = comtmp.transpose() comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F') if n_iter == 1: break nxsupra_old = nxsupra nxsupra = make_consensus_matrix(comtmp, consensus_threshold) # If there was no consensus, there are no communities possible, return if nxsupra is None: break if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all(): break communities = comtmp[:, :, 0] if temporal_consensus == True: communities = make_temporal_consensus(communities) return communities
python
{ "resource": "" }
q506
make_temporal_consensus
train
def make_temporal_consensus(com_membership): r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance """ com_membership = np.array(com_membership) # make first indicies be between 0 and 1. com_membership[:, 0] = clean_community_indexes(com_membership[:, 0]) # loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before for t in range(1, com_membership.shape[1]): ct, counts_t = np.unique(com_membership[:, t], return_counts=True) ct = ct[np.argsort(counts_t)[::-1]] c1back = np.unique(com_membership[:, t-1]) new_index = np.zeros(com_membership.shape[0]) for n in ct: if len(c1back) > 0: d = np.ones(int(c1back.max())+1) for m in c1back: v1 = np.zeros(com_membership.shape[0]) v2 = np.zeros(com_membership.shape[0]) v1[com_membership[:, t] == n] = 1 v2[com_membership[:, t-1] == m] = 1 d[int(m)] = jaccard(v1, v2) bestval = np.argmin(d) else: bestval = new_index.max() + 1 new_index[com_membership[:, t] == n] = bestval c1back = np.array(np.delete(c1back, np.where(c1back == bestval))) com_membership[:, t] = new_index return com_membership
python
{ "resource": "" }
q507
flexibility
train
def flexibility(communities): """ Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. """ # Preallocate flex = np.zeros(communities.shape[0]) # Go from the second time point to last, compare with time-point before for t in range(1, communities.shape[1]): flex[communities[:, t] != communities[:, t-1]] += 1 # Normalize flex = flex / (communities.shape[1] - 1) return flex
python
{ "resource": "" }
q508
load_tabular_file
train
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): """ Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified) """ if index_col: index_col = 0 else: index_col = None if header: header = 0 else: header = None df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t') if return_meta: json_fname = fname.replace('tsv', 'json') meta = pd.read_json(json_fname) return df, meta else: return df
python
{ "resource": "" }
q509
get_sidecar
train
def get_sidecar(fname, allowedfileformats='default'): """ Loads sidecar or creates one """ if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] for f in allowedfileformats: fname = fname.split(f)[0] fname += '.json' if os.path.exists(fname): with open(fname) as fs: sidecar = json.load(fs) else: sidecar = {} if 'filestatus' not in sidecar: sidecar['filestatus'] = {} sidecar['filestatus']['reject'] = False sidecar['filestatus']['reason'] = [] return sidecar
python
{ "resource": "" }
q510
process_exclusion_criteria
train
def process_exclusion_criteria(exclusion_criteria): """ Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun """ relfun = [] threshold = [] for ec in exclusion_criteria: if ec[0:2] == '>=': relfun.append(np.greater_equal) threshold.append(float(ec[2:])) elif ec[0:2] == '<=': relfun.append(np.less_equal) threshold.append(float(ec[2:])) elif ec[0] == '>': relfun.append(np.greater) threshold.append(float(ec[1:])) elif ec[0] == '<': relfun.append(np.less) threshold.append(float(ec[1:])) else: raise ValueError('exclusion crieria must being with >,<,>= or <=') return relfun, threshold
python
{ "resource": "" }
q511
reachability_latency
train
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): """ Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths. """ if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] netshape = pathmat.shape edges_to_reach = netshape[0] - np.round(netshape[0] * rratio) reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan for t_ind in range(0, netshape[2]): paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1) reach_lat[:, t_ind] = paths_sort[:, edges_to_reach] if calc == 'global': reach_lat = np.nansum(reach_lat) reach_lat = reach_lat / ((netshape[0]) * netshape[2]) elif calc == 'nodes': reach_lat = np.nansum(reach_lat, axis=1) reach_lat = reach_lat / (netshape[2]) return reach_lat
python
{ "resource": "" }
q512
recruitment
train
def recruitment(temporalcommunities, staticcommunities): """ Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Rcoeff : array recruitment coefficient for each node References: ----------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Rcoeff = np.zeros(len(staticcommunities)) for i, statcom in enumerate(staticcommunities): Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom]) return Rcoeff
python
{ "resource": "" }
q513
integration
train
def integration(temporalcommunities, staticcommunities): """ Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Icoeff : array integration coefficient for each node References: ---------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg = allegiance(temporalcommunities) Icoeff = np.zeros(len(staticcommunities)) # calc integration for each node for i, statcom in enumerate(len(staticcommunities)): Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom]) return Icoeff
python
{ "resource": "" }
q514
intercontacttimes
train
def intercontacttimes(tnet): """ Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] Notes ------ The inter-contact times is calculated by the time between consequecutive "active" edges (where active means that the value is 1 in a binary network). Examples -------- This example goes through how inter-contact times are calculated. >>> import teneto >>> import numpy as np Make a network with 2 nodes and 4 time-points with 4 edges spaced out. >>> G = np.zeros([2,2,10]) >>> edge_on = [1,3,5,9] >>> G[0,1,edge_on] = 1 The network visualised below make it clear what the inter-contact times are between the two nodes: .. plot:: import teneto import numpy as np import matplotlib.pyplot as plt G = np.zeros([2,2,10]) edge_on = [1,3,5,9] G[0,1,edge_on] = 1 fig, ax = plt.subplots(1, figsize=(4,2)) teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2') ax.set_ylim(-0.25, 1.25) plt.tight_layout() fig.show() Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1. >>> ict = teneto.networkmeasures.intercontacttimes(G) The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN. So the icts between nodes 0 and 1 are found by: >>> ict['intercontacttimes'][0,1] array([2, 2, 4]) """ # Process input tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': print('WARNING: assuming connections to be binary when computing intercontacttimes') # Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:]. # Then discard the noninformative ones (done automatically) # Finally return back as np array contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0]) if tnet.nettype[1] == 'u': for i in range(0, tnet.netshape[0]): for j in range(i + 1, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) contacts[j, i] = np.array(edge_on_diff) else: contacts[i, j] = [] contacts[j, i] = [] elif tnet.nettype[1] == 'd': for i in range(0, tnet.netshape[0]): for j in range(0, tnet.netshape[0]): edge_on = tnet.get_network_when(i=i, j=j)['t'].values if len(edge_on) > 0: edge_on_diff = edge_on[1:] - edge_on[:-1] contacts[i, j] = np.array(edge_on_diff) else: contacts[i, j] = [] out = {} out['intercontacttimes'] = contacts out['nettype'] = tnet.nettype return out
python
{ "resource": "" }
q515
gen_report
train
def gen_report(report, sdir='./', report_name='report.html'): """ Generates report of derivation and postprocess steps in teneto.derive """ # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir += '/' report_html = '<html><body>' if 'method' in report.keys(): report_html += "<h1>Method: " + report['method'] + "</h1><p>" for i in report[report['method']]: if i == 'taper_window': fig, ax = plt.subplots(1) ax.plot(report[report['method']]['taper_window'], report[report['method']]['taper']) ax.set_xlabel('Window (time). 0 in middle of window.') ax.set_title( 'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).') fig.savefig(sdir + 'taper.png') report_html += "<img src='./taper.png' width=500>" + "<p>" else: report_html += "- <b>" + i + "</b>: " + \ str(report[report['method']][i]) + "<br>" if 'postprocess' in report.keys(): report_html += "<p><h2>Postprocessing:</h2><p>" report_html += "<b>Pipeline: </b>" for i in report['postprocess']: report_html += " " + i + "," for i in report['postprocess']: report_html += "<p><h3>" + i + "</h3><p>" for j in report[i]: if j == 'lambda': report_html += "- <b>" + j + "</b>: " + "<br>" lambda_val = np.array(report['boxcox']['lambda']) fig, ax = plt.subplots(1) ax.hist(lambda_val[:, -1]) ax.set_xlabel('lambda') ax.set_ylabel('frequency') ax.set_title('Histogram of lambda parameter') fig.savefig(sdir + 'boxcox_lambda.png') report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>" report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>" np.savetxt(sdir + "boxcox_lambda.csv", lambda_val, delimiter=",") else: report_html += "- <b>" + j + "</b>: " + \ str(report[i][j]) + "<br>" report_html += '</body></html>' with open(sdir + report_name, 'w') as file: file.write(report_html) file.close()
python
{ "resource": "" }
q516
TenetoBIDS.add_history
train
def add_history(self, fname, fargs, init=0): """ Adds a processing step to TenetoBIDS.history. """ if init == 1: self.history = [] self.history.append([fname, fargs])
python
{ "resource": "" }
q517
TenetoBIDS.derive_temporalnetwork
train
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else: confounds_exist = False if not confound_corr_report: confounds_exist = False if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params, confounds_exist, confound_files) for i, f in enumerate(files) if f} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline = self.pipeline self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('tvc') self.set_bids_suffix('tvcconn')
python
{ "resource": "" }
q518
TenetoBIDS.make_functional_connectivity
train
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): """ Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) R_group = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files} for j in as_completed(job): R_group.append(j.result()) if returngroup: # Fisher tranform -> mean -> inverse fisher tranform R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0)) return np.array(R_group)
python
{ "resource": "" }
q519
TenetoBIDS._save_namepaths_bids_derivatives
train
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): """ Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns ------- save_name : str previous filename with new tag save_dir : str directory where it will be saved base_dir : str subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) """ file_name = f.split('/')[-1].split('.')[0] if tag != '': tag = '_' + tag if suffix: file_name, _ = drop_bids_suffix(file_name) save_name = file_name + tag save_name += '_' + suffix else: save_name = file_name + tag paths_post_pipeline = f.split(self.pipeline) if self.pipeline_subdir: paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[ 0] else: paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0] base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \ teneto.__version__ + '/' + paths_post_pipeline + '/' save_dir = base_dir + '/' + save_directory + '/' if not os.path.exists(save_dir): # A case has happened where this has been done in parallel and an error was raised. So do try/except try: os.makedirs(save_dir) except: # Wait 2 seconds so that the error does not try and save something in the directory before it is created time.sleep(2) if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'): try: with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs: json.dump(self.tenetoinfo, fs) except: # Same as above, just in case parallel does duplicaiton time.sleep(2) return save_name, save_dir, base_dir
python
{ "resource": "" }
q520
TenetoBIDS.get_tags
train
def get_tags(self, tag, quiet=1): """ Returns which tag alternatives can be identified in the BIDS derivatives structure. """ if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives(quiet) else: if tag == 'sub': datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/' tag_alternatives = [ f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f] elif tag == 'ses': tag_alternatives = [] for sub in self.bids_tags['sub']: tag_alternatives += [f.split('ses-')[1] for f in os.listdir( self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f] tag_alternatives = set(tag_alternatives) else: files = self.get_selected_files(quiet=1) tag_alternatives = [] for f in files: f = f.split('.')[0] f = f.split('/')[-1] tag_alternatives += [t.split('-')[1] for t in f.split('_') if t.split('-')[0] == tag] tag_alternatives = set(tag_alternatives) if quiet == 0: print(tag + ' alternatives: ' + ', '.join(tag_alternatives)) return list(tag_alternatives)
python
{ "resource": "" }
q521
TenetoBIDS.set_exclusion_file
train
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
python
{ "resource": "" }
q522
TenetoBIDS.make_parcellation
train
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): """ Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278' parc_type : str can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions network : str if "defaults", it selects static parcellation, _if available_ (other options will be made available soon). removeconfounds : bool if true, regresses out confounds that are specfied in self.set_confounds with linear regression. update_pipeline : bool TenetoBIDS gets updated with the parcellated files being selected. tag : str or list any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids). clean_params : dict **kwargs for nilearn function nilearn.signal.clean yeonetworkn : int (7 or 17) Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks njobs : n number of processes to run. Overrides TenetoBIDS.njobs Returns ------- Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/. To load these files call TenetoBIDS.load_parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) parc_name = parcellation.split('_')[0].lower() # Check confounds have been specified if not self.confounds and removeconfounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.') # Check confounds have been specified if update_pipeline == False and removeconfounds: raise ValueError( 'Pipeline must be updated in order to remove confounds within this funciton.') # In theory these should be the same. So at the moment, it goes through each element and checks they are matched. # A matching algorithem may be needed if cases arise where this isnt the case files = self.get_selected_files(quiet=1) # Load network communities, if possible. self.set_network_communities(parcellation, netn=yeonetworkn) if not tag: tag = '' else: tag = 'desc-' + tag if not parc_params: parc_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation, parc_name, parc_type, parc_params) for i, f in enumerate(files)} for j in as_completed(job): j.result() if update_pipeline == True: if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0: self.set_confound_pipeline(self.pipeline) self.set_pipeline('teneto_' + teneto.__version__) self.set_pipeline_subdir('parcellation') if tag: self.set_bids_tags({'desc': tag.split('-')[1]}) self.set_bids_suffix('roi') if removeconfounds: self.removeconfounds( clean_params=clean_params, transpose=None, njobs=njobs)
python
{ "resource": "" }
q523
TenetoBIDS.communitydetection
train
def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None): """ Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint. file_idx : bool (default false) if true, index column present in data and this will be ignored file_hdr : bool (default false) if true, header row present in data and this will be ignored njobs : int number of processes to run. Overrides TenetoBIDS.njobs Note ---- All non-positive edges are made to zero. Returns ------- List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not tag: tag = '' else: tag = 'desc-' + tag if community_type == 'temporal': files = self.get_selected_files(quiet=True) # Run check to make sure files are tvc input for f in files: if 'tvc' not in f: raise ValueError( 'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).') elif community_type == 'static': files = self.get_selected_files( quiet=True, pipeline='functionalconnectivity') with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr, file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])} for j in as_completed(job): j.result()
python
{ "resource": "" }
q524
TenetoBIDS.removeconfounds
train
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): """ Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.') if not tag: tag = '' else: tag = 'desc-' + tag if confounds: self.set_confounds(confounds) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) if not clean_params: clean_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)} for j in as_completed(job): j.result() self.set_pipeline('teneto_' + teneto.__version__) self.set_bids_suffix('roi') if tag: self.set_bids_tags({'desc': tag.split('-')[1]})
python
{ "resource": "" }
q525
TenetoBIDS.networkmeasures
train
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): """ Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwargs for the argument in measure. See note regarding Communities key. tag : str Add additional tag to saved filenames. Note ---- In measure_params, if communities can equal 'template', 'static', or 'temporal'. These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy Returns ------- Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/ Load the measure with tenetoBIDS.load_network_measure """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) # measure can be string or list if isinstance(measure, str): measure = [measure] # measure_params can be dictionaary or list of dictionaries if isinstance(measure_params, dict): measure_params = [measure_params] if measure_params and len(measure) != len(measure_params): raise ValueError('Number of identified measure_params (' + str(len(measure_params)) + ') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted') files = self.get_selected_files(quiet=1) if not tag: tag = '' else: tag = 'desc-' + tag with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_networkmeasures, f, tag, measure, measure_params) for f in files} for j in as_completed(job): j.result()
python
{ "resource": "" }
q526
TenetoBIDS.set_bids_suffix
train
def set_bids_suffix(self, bids_suffix): """ The last analysis step is the final tag that is present in files. """ self.add_history(inspect.stack()[0][3], locals(), 1) self.bids_suffix = bids_suffix
python
{ "resource": "" }
q527
TenetoBIDS.set_pipeline
train
def set_pipeline(self, pipeline): """ Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. """ self.add_history(inspect.stack()[0][3], locals(), 1) if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline): print('Specified direvative directory not found.') self.get_pipeline_alternatives() else: # Todo: perform check that pipeline is valid self.pipeline = pipeline
python
{ "resource": "" }
q528
TenetoBIDS.load_frompickle
train
def load_frompickle(cls, fname, reload_object=False): """ Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : TenetoBIDS instance """ if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'rb') as f: tnet = pickle.load(f) if reload_object: reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix, bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs) reloadnet.histroy = tnet.history tnet = reloadnet return tnet
python
{ "resource": "" }
q529
temporal_closeness_centrality
train
def temporal_closeness_centrality(tnet=None, paths=None): ''' Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_paths Returns -------- :close: array temporal closness centrality (nodal measure) ''' if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] closeness = np.nansum(1 / np.nanmean(pathmat, axis=2), axis=1) / (pathmat.shape[1] - 1) return closeness
python
{ "resource": "" }
q530
flatten
train
def flatten(d, reducer='tuple', inverse=False): """Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict """ if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None): for key, value in six.viewitems(d): flat_key = reducer(parent, key) if isinstance(value, Mapping): _flatten(value, flat_key) else: if inverse: flat_key, value = value, flat_key if flat_key in flat_dict: raise ValueError("duplicated key '{}'".format(flat_key)) flat_dict[flat_key] = value _flatten(d) return flat_dict
python
{ "resource": "" }
q531
nested_set_dict
train
def nested_set_dict(d, keys, value): """Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any """ assert keys key = keys[0] if len(keys) == 1: if key in d: raise ValueError("duplicated key '{}'".format(key)) d[key] = value return d = d.setdefault(key, {}) nested_set_dict(d, keys[1:], value)
python
{ "resource": "" }
q532
unflatten
train
def unflatten(d, splitter='tuple', inverse=False): """Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict """ if isinstance(splitter, str): splitter = SPLITTER_DICT[splitter] unflattened_dict = {} for flat_key, value in six.viewitems(d): if inverse: flat_key, value = value, flat_key key_tuple = splitter(flat_key) nested_set_dict(unflattened_dict, key_tuple, value) return unflattened_dict
python
{ "resource": "" }
q533
plot_track
train
def plot_track(track, filename=None, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid_linewidth=.5): """ Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. downbeats : list An array that indicates whether the time step contains a downbeat (i.e., the first time step of a bar). preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmap : `matplotlib.colors.Colormap` The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to 'Blues'. Only effective when `pianoroll` is 2D. xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults to 'both'. grid : {'x', 'y', 'both', 'off'} A string that indicates whether to add grids to the x-axis, y-axis, both or neither. Defaults to 'both'. grid_linestyle : str Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle' argument. grid_linewidth : float Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth' argument. Returns ------- fig : `matplotlib.figure.Figure` object A :class:`matplotlib.figure.Figure` object. ax : `matplotlib.axes.Axes` object A :class:`matplotlib.axes.Axes` object. """ if not HAS_MATPLOTLIB: raise ImportError("matplotlib package is required for plotting " "supports.") fig, ax = plt.subplots() plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution, downbeats, preset=preset, cmap=cmap, xtick=xtick, ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) if filename is not None: plt.savefig(filename) return fig, ax
python
{ "resource": "" }
q534
Multitrack.append_track
train
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): """ Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str The name of the track. Defaults to 'unknown'. Only effective when `track` is None. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if track is not None: if not isinstance(track, Track): raise TypeError("`track` must be a pypianoroll.Track instance.") track.check_validity() else: track = Track(pianoroll, program, is_drum, name) self.tracks.append(track)
python
{ "resource": "" }
q535
Multitrack.check_validity
train
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray): raise TypeError("`downbeat` must be a numpy array.") if not np.issubdtype(self.downbeat.dtype, np.bool_): raise TypeError("Data type of `downbeat` must be bool.") if self.downbeat.ndim != 1: raise ValueError("`downbeat` must be a 1D numpy array.") # beat_resolution if not isinstance(self.beat_resolution, int): raise TypeError("`beat_resolution` must be int.") if self.beat_resolution < 1: raise ValueError("`beat_resolution` must be a positive integer.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
python
{ "resource": "" }
q536
Multitrack.clip
train
def clip(self, lower=0, upper=127): """ Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127. """ for track in self.tracks: track.clip(lower, upper)
python
{ "resource": "" }
q537
Multitrack.get_downbeat_steps
train
def get_downbeat_steps(self): """ Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats. """ if self.downbeat is None: return [] downbeat_steps = np.nonzero(self.downbeat)[0].tolist() return downbeat_steps
python
{ "resource": "" }
q538
Multitrack.get_empty_tracks
train
def get_empty_tracks(self): """ Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls. """ empty_track_indices = [idx for idx, track in enumerate(self.tracks) if not np.any(track.pianoroll)] return empty_track_indices
python
{ "resource": "" }
q539
Multitrack.get_merged_pianoroll
train
def get_merged_pianoroll(self, mode='sum'): """ Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among all the pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the pianorolls has nonzero value at that pixel; False if all pianorolls are inactive (zero-valued) at that pixel. Returns ------- merged : np.ndarray, shape=(n_time_steps, 128) The merged pianoroll. """ stacked = self.get_stacked_pianoroll() if mode == 'any': merged = np.any(stacked, axis=2) elif mode == 'sum': merged = np.sum(stacked, axis=2) elif mode == 'max': merged = np.max(stacked, axis=2) else: raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") return merged
python
{ "resource": "" }
q540
Multitrack.merge_tracks
train
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): """ Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. name : str A name to be assigned to the merged track. Defaults to 'merged'. remove_merged : bool True to remove the source tracks from the track list. False to keep them. Defaults to False. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if mode not in ('max', 'sum', 'any'): raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") merged = self[track_indices].get_merged_pianoroll(mode) merged_track = Track(merged, program, is_drum, name) self.append_track(merged_track) if remove_merged: self.remove_tracks(track_indices)
python
{ "resource": "" }
q541
Multitrack.pad_to_same
train
def pad_to_same(self): """Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.""" max_length = self.get_max_length() for track in self.tracks: if track.pianoroll.shape[0] < max_length: track.pad(max_length - track.pianoroll.shape[0])
python
{ "resource": "" }
q542
Multitrack.remove_tracks
train
def remove_tracks(self, track_indices): """ Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed. """ if isinstance(track_indices, int): track_indices = [track_indices] self.tracks = [track for idx, track in enumerate(self.tracks) if idx not in track_indices]
python
{ "resource": "" }
q543
Multitrack.transpose
train
def transpose(self, semitone): """ Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls. """ for track in self.tracks: if not track.is_drum: track.transpose(semitone)
python
{ "resource": "" }
q544
Multitrack.trim_trailing_silence
train
def trim_trailing_silence(self): """Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.""" active_length = self.get_active_length() for track in self.tracks: track.pianoroll = track.pianoroll[:active_length]
python
{ "resource": "" }
q545
Multitrack.write
train
def write(self, filename): """ Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written. """ if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')): filename = filename + '.mid' pm = self.to_pretty_midi() pm.write(filename)
python
{ "resource": "" }
q546
check_pianoroll
train
def check_pianoroll(arr): """ Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array. """ if not isinstance(arr, np.ndarray): raise TypeError("`arr` must be of np.ndarray type") if not (np.issubdtype(arr.dtype, np.bool_) or np.issubdtype(arr.dtype, np.number)): return False if arr.ndim != 2: return False if arr.shape[1] != 128: return False return True
python
{ "resource": "" }
q547
pad
train
def pad(obj, pad_length): """ Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros. """ _check_supported(obj) copied = deepcopy(obj) copied.pad(pad_length) return copied
python
{ "resource": "" }
q548
pad_to_multiple
train
def pad_to_multiple(obj, factor): """ Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be a multiple of. """ _check_supported(obj) copied = deepcopy(obj) copied.pad_to_multiple(factor) return copied
python
{ "resource": "" }
q549
pad_to_same
train
def pad_to_same(obj): """ Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with the maximal length. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") copied = deepcopy(obj) copied.pad_to_same() return copied
python
{ "resource": "" }
q550
save
train
def save(filepath, obj, compressed=True): """ Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file. obj: `pypianoroll.Multitrack` objects The object to be saved. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.save(filepath, compressed)
python
{ "resource": "" }
q551
write
train
def write(obj, filepath): """ Write the object to a MIDI file. Parameters ---------- filepath : str The path to write the MIDI file. """ if not isinstance(obj, Multitrack): raise TypeError("Support only `pypianoroll.Multitrack` class objects") obj.write(filepath)
python
{ "resource": "" }
q552
_validate_pianoroll
train
def _validate_pianoroll(pianoroll): """Raise an error if the input array is not a standard pianoroll.""" if not isinstance(pianoroll, np.ndarray): raise TypeError("`pianoroll` must be of np.ndarray type.") if not (np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or a " "subdtype of np.number.") if pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` must be " "128.")
python
{ "resource": "" }
q553
_to_chroma
train
def _to_chroma(pianoroll): """Return the unnormalized chroma features of a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll[:, :120].reshape(-1, 12, 10) reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8) return np.sum(reshaped, 1)
python
{ "resource": "" }
q554
empty_beat_rate
train
def empty_beat_rate(pianoroll, beat_resolution): """Return the ratio of empty beats to the total number of beats in a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1]) n_empty_beats = np.count_nonzero(reshaped.any(1)) return n_empty_beats / len(reshaped)
python
{ "resource": "" }
q555
n_pitche_classes_used
train
def n_pitche_classes_used(pianoroll): """Return the number of unique pitch classes used in a pianoroll.""" _validate_pianoroll(pianoroll) chroma = _to_chroma(pianoroll) return np.count_nonzero(np.any(chroma, 0))
python
{ "resource": "" }
q556
polyphonic_rate
train
def polyphonic_rate(pianoroll, threshold=2): """Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll.""" _validate_pianoroll(pianoroll) n_poly = np.count_nonzero(np.count_nonzero(pianoroll, 1) > threshold) return n_poly / len(pianoroll)
python
{ "resource": "" }
q557
in_scale_rate
train
def in_scale_rate(pianoroll, key=3, kind='major'): """Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.""" if not isinstance(key, int): raise TypeError("`key` must an integer.") if key > 11 or key < 0: raise ValueError("`key` must be in an integer in between 0 and 11.") if kind not in ('major', 'minor'): raise ValueError("`kind` must be one of 'major' or 'minor'.") _validate_pianoroll(pianoroll) def _scale_mask(key, kind): """Return a scale mask for the given key. Default to C major scale.""" if kind == 'major': a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool) else: a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool) return np.roll(a_scale_mask, key) chroma = _to_chroma(pianoroll) scale_mask = _scale_mask(key, kind) n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma) return n_in_scale / np.count_nonzero(pianoroll)
python
{ "resource": "" }
q558
Track.assign_constant
train
def assign_constant(self, value, dtype=None): """ Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll. """ if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value return if dtype is None: if isinstance(value, int): dtype = int elif isinstance(value, float): dtype = float nonzero = self.pianoroll.nonzero() self.pianoroll = np.zeros(self.pianoroll.shape, dtype) self.pianoroll[nonzero] = value
python
{ "resource": "" }
q559
Track.binarize
train
def binarize(self, threshold=0): """ Binarize the pianoroll. Parameters ---------- threshold : int or float A threshold used to binarize the pianorolls. Defaults to zero. """ if not self.is_binarized(): self.pianoroll = (self.pianoroll > threshold)
python
{ "resource": "" }
q560
Track.check_validity
train
def check_validity(self): """"Raise error if any invalid attribute found.""" # pianoroll if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or " "a subdtype of np.number.") if self.pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if self.pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` " "must be 128.") # program if not isinstance(self.program, int): raise TypeError("`program` must be int.") if self.program < 0 or self.program > 127: raise ValueError("`program` must be in between 0 to 127.") # is_drum if not isinstance(self.is_drum, bool): raise TypeError("`is_drum` must be bool.") # name if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
python
{ "resource": "" }
q561
Track.clip
train
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int or float The upper bound to clip the pianoroll. Defaults to 127. """ self.pianoroll = self.pianoroll.clip(lower, upper)
python
{ "resource": "" }
q562
Track.is_binarized
train
def is_binarized(self): """ Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False. """ is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
python
{ "resource": "" }
q563
Track.pad
train
def pad(self, pad_length): """ Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis. """ self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
python
{ "resource": "" }
q564
Track.pad_to_multiple
train
def pad_to_multiple(self, factor): """ Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of. """ remainder = self.pianoroll.shape[0] % factor if remainder: pad_width = ((0, (factor - remainder)), (0, 0)) self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant')
python
{ "resource": "" }
q565
Track.transpose
train
def transpose(self, semitone): """ Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll. """ if semitone > 0 and semitone < 128: self.pianoroll[:, semitone:] = self.pianoroll[:, :(128 - semitone)] self.pianoroll[:, :semitone] = 0 elif semitone < 0 and semitone > -128: self.pianoroll[:, :(128 + semitone)] = self.pianoroll[:, -semitone:] self.pianoroll[:, (128 + semitone):] = 0
python
{ "resource": "" }
q566
Track.trim_trailing_silence
train
def trim_trailing_silence(self): """Trim the trailing silence of the pianoroll.""" length = self.get_active_length() self.pianoroll = self.pianoroll[:length]
python
{ "resource": "" }
q567
plot_conv_weights
train
def plot_conv_weights(layer, figsize=(6, 6)): """Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer """ W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[0]: break axes[r, c].imshow(W[i, feature_map], cmap='gray', interpolation='none') return plt
python
{ "resource": "" }
q568
plot_conv_activity
train
def plot_conv_activity(layer, x, figsize=(6, 8)): """Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1. """ if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x) shape = activity.shape nrows = np.ceil(np.sqrt(shape[1])).astype(int) ncols = nrows figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False) axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray', interpolation='none') axes[0, ncols // 2].set_title('original') for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[1]: break ndim = activity[0][i].ndim if ndim != 2: raise ValueError("Wrong number of dimensions, image data should " "have 2, instead got {}".format(ndim)) axes[r + 1, c].imshow(-activity[0][i], cmap='gray', interpolation='none') return plt
python
{ "resource": "" }
q569
occlusion_heatmap
train
def occlusion_heatmap(net, x, target, square_length=7): """An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j). """ if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square length has to be an odd number, instead " "got {}.".format(square_length)) num_classes = get_output_shape(net.layers_[-1])[1] img = x[0].copy() bs, col, s0, s1 = x.shape heat_array = np.zeros((s0, s1)) pad = square_length // 2 + 1 x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype) probs = np.zeros((s0, s1, num_classes)) # generate occluded images for i in range(s0): # batch s1 occluded images for faster prediction for j in range(s1): x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant') x_pad[:, i:i + square_length, j:j + square_length] = 0. x_occluded[j] = x_pad[:, pad:-pad, pad:-pad] y_proba = net.predict_proba(x_occluded) probs[i] = y_proba.reshape(s1, num_classes) # from predicted probabilities, pick only those of target class for i in range(s0): for j in range(s1): heat_array[i, j] = probs[i, j, target] return heat_array
python
{ "resource": "" }
q570
plot_occlusion
train
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): """Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed. """ return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
python
{ "resource": "" }
q571
multiclass_logloss
train
def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
python
{ "resource": "" }
q572
objective
train
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): """ Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss """ if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] network_output = get_output( output_layer, deterministic=deterministic, **get_output_kw) loss = aggregate(loss_function(network_output, target)) if l1: loss += regularization.regularize_layer_params( layers.values(), regularization.l1) * l1 if l2: loss += regularization.regularize_layer_params( layers.values(), regularization.l2) * l2 return loss
python
{ "resource": "" }
q573
NeuralNet.initialize
train
def initialize(self): """Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action. """ if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layers_, self.objective, self.update, self.y_tensor_type, ) self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs self._initialized = True
python
{ "resource": "" }
q574
NeuralNet.fit
train
def fit(self, X, y, epochs=None): """ Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance """ if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initialize() try: self.train_loop(X, y, epochs=epochs) except KeyboardInterrupt: pass return self
python
{ "resource": "" }
q575
NeuralNet.partial_fit
train
def partial_fit(self, X, y, classes=None): """ Runs a single epoch using the provided data :return: This instance """ return self.fit(X, y, epochs=1)
python
{ "resource": "" }
q576
ByDateQuerySetMixin.narrow
train
def narrow(self, **kwargs): """Up-to including""" from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs = qs.filter(date__gte=from_date) if to_date: qs = qs.filter(date__lte=to_date) if date: qs = qs.filter(date=date) return super(ByDateQuerySetMixin, qs).narrow(**kwargs)
python
{ "resource": "" }
q577
set_environment_variables
train
def set_environment_variables(json_file_path): """ Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str """ if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
python
{ "resource": "" }
q578
millis_interval
train
def millis_interval(start, end): """start and end are datetime instances""" diff = end - start millis = diff.days * 24 * 60 * 60 * 1000 millis += diff.seconds * 1000 millis += diff.microseconds / 1000 return millis
python
{ "resource": "" }
q579
Script._import_lua
train
def _import_lua(load_dependencies=True): """ Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available """ try: import lua except ImportError: raise RuntimeError("Lua not installed") lua_globals = lua.globals() if load_dependencies: Script._import_lua_dependencies(lua, lua_globals) return lua, lua_globals
python
{ "resource": "" }
q580
Script._import_lua_dependencies
train
def _import_lua_dependencies(lua, lua_globals): """ Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table lib. - string lib. - math lib. - debug lib. - cmsgpack lib. """ if sys.platform not in ('darwin', 'windows'): import ctypes ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) try: lua_globals.cjson = lua.eval('require "cjson"') except RuntimeError: raise RuntimeError("cjson not installed")
python
{ "resource": "" }
q581
MockRedis.lock
train
def lock(self, key, timeout=0, sleep=0): """Emulate lock.""" return MockRedisLock(self, key, timeout, sleep)
python
{ "resource": "" }
q582
MockRedis.keys
train
def keys(self, pattern='*'): """Emulate keys.""" # making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2 except (AttributeError, UnicodeEncodeError): pass # Make regex out of glob styled pattern. regex = fnmatch.translate(pattern) regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex)) # Find every key that matches the pattern return [key for key in self.redis.keys() if regex.match(key.decode('utf-8'))]
python
{ "resource": "" }
q583
MockRedis.delete
train
def delete(self, *keys): """Emulate delete.""" key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key] key_counter += 1 if key in self.timeouts: del self.timeouts[key] return key_counter
python
{ "resource": "" }
q584
MockRedis.do_expire
train
def do_expire(self): """ Expire objects assuming now == time """ # Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0): del self.timeouts[key] # removing the expired key if key in self.redis: self.redis.pop(key, None)
python
{ "resource": "" }
q585
MockRedis.set
train
def set(self, key, value, ex=None, px=None, nx=False, xx=False): """ Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None. """ key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if ex is not None: expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex) if px is not None: expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px) if expire is not None and expire.total_seconds() <= 0: raise ResponseError("invalid expire time in SETEX") result = self._set(key, value) if expire: self._expire(key, expire) return result
python
{ "resource": "" }
q586
MockRedis._should_set
train
def _should_set(self, key, mode): """ Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx). """ if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent # false if the key already exists return False elif key not in self.redis: # at this point mode can only be xx # xx means set only if the key already exists # false if is absent return False # for all other cases, return true return True
python
{ "resource": "" }
q587
MockRedis.setex
train
def setex(self, name, time, value): """ Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object. """ if not self.strict: # when not strict mode swap value and time args order time, value = value, time return self.set(name, value, ex=time)
python
{ "resource": "" }
q588
MockRedis.psetex
train
def psetex(self, key, time, value): """ Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object. """ return self.set(key, value, px=time)
python
{ "resource": "" }
q589
MockRedis.setnx
train
def setnx(self, key, value): """Set the value of ``key`` to ``value`` if key doesn't exist""" return self.set(key, value, nx=True)
python
{ "resource": "" }
q590
MockRedis.setbit
train
def setbit(self, key, offset, value): """ Set the bit at ``offset`` in ``key`` to ``value``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits))) prev_val = 1 if (bits[index] & mask) else 0 if value: bits[index] |= mask else: bits[index] &= ~mask self.redis[key] = bytes(bits) return prev_val
python
{ "resource": "" }
q591
MockRedis.getbit
train
def getbit(self, key, offset): """ Returns the bit value at ``offset`` in ``key``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): return 0 return 1 if (bits[index] & mask) else 0
python
{ "resource": "" }
q592
MockRedis.hexists
train
def hexists(self, hashkey, attribute): """Emulate hexists.""" redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
python
{ "resource": "" }
q593
MockRedis.hget
train
def hget(self, hashkey, attribute): """Emulate hget.""" redis_hash = self._get_hash(hashkey, 'HGET') return redis_hash.get(self._encode(attribute))
python
{ "resource": "" }
q594
MockRedis.hmset
train
def hmset(self, hashkey, value): """Emulate hmset.""" redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items(): attribute = self._encode(key) redis_hash[attribute] = self._encode(value) return True
python
{ "resource": "" }
q595
MockRedis.hmget
train
def hmget(self, hashkey, keys, *args): """Emulate hmget.""" redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args) return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
python
{ "resource": "" }
q596
MockRedis.hset
train
def hset(self, hashkey, attribute, value): """Emulate hset.""" redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute) attribute_present = attribute in redis_hash redis_hash[attribute] = self._encode(value) return long(0) if attribute_present else long(1)
python
{ "resource": "" }
q597
MockRedis.hsetnx
train
def hsetnx(self, hashkey, attribute, value): """Emulate hsetnx.""" redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return long(0) else: redis_hash[attribute] = self._encode(value) return long(1)
python
{ "resource": "" }
q598
MockRedis.hincrby
train
def hincrby(self, hashkey, attribute, increment=1): """Emulate hincrby.""" return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
python
{ "resource": "" }
q599
MockRedis.hincrbyfloat
train
def hincrbyfloat(self, hashkey, attribute, increment=1.0): """Emulate hincrbyfloat.""" return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
python
{ "resource": "" }