signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def setParameterList(self, paramlist):
self._parameters = paramlist<EOL>
Clears and sets all parameters to *paramlist* :param paramlist: all parameters for this model to have :type paramlist: list<dict>
f10683:c0:m17
def insertRow(self, position):
if position == -<NUM_LIT:1>:<EOL><INDENT>position = self.nrows()<EOL><DEDENT>defaultparam = { '<STR_LIT:start>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>' : [],<EOL>}<EOL>self._parameters.insert(position, defaultparam)<EOL>
Inserts an empty parameter at index *position* :param position: order to insert new parameter to :type position: int
f10683:c0:m18
def removeRow(self, position):
p = self._parameters.pop(position)<EOL>return p<EOL>
Removes the parameter at index *position* :param position: the parameter index :type position: int :returns: dict -- the removed parameter
f10683:c0:m19
def selectedParameterTypes(self, row):
param = self._parameters[row]<EOL>return self._selectionParameters(param)<EOL>
Gets a list of the intersection of the editable properties in the parameteter *param*'s component selection. E.g. ['frequency', 'intensity'] :param row: the ith parameter number :type row: int :returns: list<str> -- a list of AbstractStimulusComponent attribute names
f10683:c0:m20
def ranges(self):
steps = []<EOL>for p in self._parameters:<EOL><INDENT>if p['<STR_LIT>'] == '<STR_LIT:filename>':<EOL><INDENT>steps.append(p['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>if p['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>start = p['<STR_LIT:start>']<EOL>stop = p['<STR_LIT>']<EOL>if start > stop:<EOL><INDENT>step = p['<STR_LIT>']*-<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>step = p['<STR_LIT>']<EOL><DEDENT>nsteps = self.nStepsForParam(p)<EOL>step_tmp = np.linspace(start, start+step*(nsteps-<NUM_LIT:2>), nsteps-<NUM_LIT:1>)<EOL>step_tmp = np.append(step_tmp,stop)<EOL>steps.append(np.around(step_tmp,<NUM_LIT:4>))<EOL><DEDENT>else:<EOL><INDENT>assert p['<STR_LIT:start>'] == p['<STR_LIT>']<EOL>steps.append([p['<STR_LIT:start>']])<EOL><DEDENT><DEDENT><DEDENT>return steps<EOL>
The expanded lists of values generated from the parameter fields :returns: list<list>, outer list is for each parameter, inner loops are that parameter's values to loop through
f10683:c0:m21
def _selectionParameters(self, param):
components = param['<STR_LIT>']<EOL>if len(components) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>editable_sets = []<EOL>for comp in components:<EOL><INDENT>details = comp.auto_details()<EOL>editable_sets.append(set(details.keys()))<EOL><DEDENT>editable_paramters = set.intersection(*editable_sets)<EOL>return list(editable_paramters)<EOL>
see docstring for selectedParameterTypes
f10683:c0:m22
def updateComponentStartVals(self):
for param in self._parameters:<EOL><INDENT>for component in param['<STR_LIT>']:<EOL><INDENT>if param['<STR_LIT>'] == '<STR_LIT:filename>':<EOL><INDENT>component.set(param['<STR_LIT>'], param['<STR_LIT>'][<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>component.set(param['<STR_LIT>'], param['<STR_LIT:start>'])<EOL><DEDENT><DEDENT><DEDENT>
Go through selected components for each auto parameter and set the start value
f10683:c0:m23
def fileParameter(self, comp):
for row in range(self.nrows()):<EOL><INDENT>p = self._parameters[row]<EOL>if p['<STR_LIT>'] == '<STR_LIT:filename>':<EOL><INDENT>if comp in p['<STR_LIT>']:<EOL><INDENT>return row<EOL><DEDENT><DEDENT><DEDENT>
Returns the row which component *comp* can be found in the selections of, and is also a filename parameter :returns: int -- the index of the (filename) parameter *comp* is a member of
f10683:c0:m24
def editableRow(self, row):
return self._parameters[row]['<STR_LIT>'] != '<STR_LIT:filename>'<EOL>
Returns whether parameter at index *row* is editable :returns: bool -- True if values can be manipulated
f10683:c0:m25
def verify(self):
for row in range(self.nrows()):<EOL><INDENT>result = self.verify_row(row)<EOL>if result != <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>
Checks all parameters for invalidating conditions :returns: str -- message if error, 0 otherwise
f10683:c0:m26
def verify_row(self, row):
param = self._parameters[row]<EOL>if param['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>if len(param['<STR_LIT>']) == <NUM_LIT:0>:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>if param['<STR_LIT>'] not in self._selectionParameters(param):<EOL><INDENT>return '<STR_LIT>'.format(param['<STR_LIT>'])<EOL><DEDENT>if param['<STR_LIT>'] == '<STR_LIT:filename>':<EOL><INDENT>if len(param['<STR_LIT>']) < <NUM_LIT:1>:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if param['<STR_LIT>'] == <NUM_LIT:0> and param['<STR_LIT:start>'] != param['<STR_LIT>']:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>if abs(param['<STR_LIT>'] - param['<STR_LIT:start>']) < param['<STR_LIT>']:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>if not self.checkLimits(row, param['<STR_LIT:start>']):<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>if not self.checkLimits(row, param['<STR_LIT>']):<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>
Checks parameter at index *row* for invalidating conditions :returns: str -- message if error, 0 otherwise
f10683:c0:m27
def batlab2sparkle(experiment_data):
<EOL>nsdata = {}<EOL>for attr in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:title>', '<STR_LIT>', '<STR_LIT:date>', '<STR_LIT>']:<EOL><INDENT>nsdata[attr] = experiment_data[attr]<EOL><DEDENT>for itest, test in enumerate(experiment_data['<STR_LIT:test>']):<EOL><INDENT>setname = '<STR_LIT>'.format(itest+<NUM_LIT:1>)<EOL>nsdata[setname] = {}<EOL>nsdata[setname]['<STR_LIT>'] = test['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>nsdata[setname]['<STR_LIT>'] = test['<STR_LIT>']<EOL>nsdata[setname]['<STR_LIT:start>'] = test['<STR_LIT:time>']<EOL>nsdata[setname]['<STR_LIT>'] = '<STR_LIT>'<EOL>nsdata[setname]['<STR_LIT>'] = '<STR_LIT>'<EOL>if test['<STR_LIT>'] == '<STR_LIT>' and test['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>nsdata[setname]['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>nsdata[setname]['<STR_LIT>'] = test['<STR_LIT>']<EOL><DEDENT>stims = []<EOL>for itrace, trace in enumerate(test['<STR_LIT>']):<EOL><INDENT>try:<EOL><INDENT>stim = {'<STR_LIT>': trace['<STR_LIT>'],<EOL>'<STR_LIT>': <NUM_LIT:0>,}<EOL>components = []<EOL>for icomp, component in enumerate(trace['<STR_LIT>']):<EOL><INDENT>delay_comp = {'<STR_LIT:index>': [icomp, <NUM_LIT:0>], '<STR_LIT>': '<STR_LIT>', <EOL>'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': component['<STR_LIT>']/<NUM_LIT>, <EOL>'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0>}<EOL>components.append(delay_comp)<EOL>comp = {'<STR_LIT>' : component['<STR_LIT>']/<NUM_LIT>, <EOL>'<STR_LIT:index>': [icomp, <NUM_LIT:1>], <EOL>'<STR_LIT>': component['<STR_LIT>']/<NUM_LIT>,<EOL>'<STR_LIT>': component['<STR_LIT>']/<NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:100> - component['<STR_LIT>']}<EOL>if component['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>comp['<STR_LIT>'] = '<STR_LIT>'<EOL>comp['<STR_LIT:filename>'] = component['<STR_LIT>']<EOL>comp['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif component['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>comp['<STR_LIT>'] = '<STR_LIT>'<EOL>usweep = <NUM_LIT:1> if component['<STR_LIT>'] else -<NUM_LIT:1><EOL>comp['<STR_LIT>'] = component['<STR_LIT>'] - (component['<STR_LIT>']/<NUM_LIT:2>)*usweep<EOL>comp['<STR_LIT>'] = component['<STR_LIT>'] + (component['<STR_LIT>']/<NUM_LIT:2>)*usweep<EOL><DEDENT>elif component['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>comp['<STR_LIT>'] = '<STR_LIT>'<EOL>comp['<STR_LIT>'] = component['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>comp['<STR_LIT>'] = component['<STR_LIT>']<EOL><DEDENT>components.append(comp)<EOL><DEDENT>stim['<STR_LIT>'] = components<EOL>stims.append(stim)<EOL><DEDENT>except TypeError:<EOL><INDENT>print('<STR_LIT>', itest, itrace)<EOL>print('<STR_LIT>', component)<EOL>continue<EOL><DEDENT><DEDENT>nsdata[setname]['<STR_LIT>'] = stims<EOL><DEDENT>return nsdata<EOL>
Sparkle expects meta data to have a certain heirarchial organization, reformat batlab experiment data to fit.
f10685:m0
def calibration_list(self):
return []<EOL>
Batlab data does not include calibration information, so this will always return an emtpy list.
f10685:c0:m5
def copy_group(from_file, to_file, key):
if not key in to_file:<EOL><INDENT>from_file.copy(key, to_file, key)<EOL><DEDENT>else:<EOL><INDENT>for attr in from_file[key].attrs:<EOL><INDENT>to_file.attrs[attr] = from_file[key].attrs[attr]<EOL><DEDENT>if hasattr(from_file[key], '<STR_LIT>'):<EOL><INDENT>for subkey in list(from_file[key].keys()):<EOL><INDENT>copy_group(from_file, to_file, '<STR_LIT:/>'.join([key,subkey]))<EOL><DEDENT><DEDENT><DEDENT>
Recursively copy all groups/datasets/attributes from from_file[key] to to_file. Datasets are not overwritten, attributes are.
f10686:m6
def _repack(h5file):
f1, opened = _openfile(h5file) <EOL>filename1 = f1.filename<EOL>filename2 = filename1 + '<STR_LIT>'<EOL>f2 = h5py.File(filename2, '<STR_LIT:w>')<EOL>for key in list(f1.keys()):<EOL><INDENT>f1.copy(key, f2)<EOL><DEDENT>f1.close()<EOL>f2.close()<EOL>filename_tmp = filename1 + '<STR_LIT>'<EOL>os.rename(filename1, filename_tmp)<EOL>os.rename(filename2, filename1) <EOL>if opened:<EOL><INDENT>f = None <EOL><DEDENT>else:<EOL><INDENT>f = h5py.File(filename1)<EOL><DEDENT>os.remove(filename_tmp)<EOL>return f<EOL>
Repack archive to remove freespace. Returns ------- file : h5py File or None If the input is a h5py.File then a h5py File instance of the repacked archive is returned. The input File instance will no longer be useable.
f10686:m8
def _openfile(h5file):
if isinstance(h5file, h5py.File):<EOL><INDENT>f = h5file<EOL>opened = False<EOL><DEDENT>elif isinstance(h5file, str):<EOL><INDENT>f = h5py.File(h5file)<EOL>opened = True<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise TypeError(msg) <EOL><DEDENT>return f, opened<EOL>
Open an archive if input is a path. Parameters ---------- h5file : str or h5py.File Filename or h5py.File instance of the archive. Returns ------- f : h5py.File Returns a h5py.File instance. opened : bool True is `h5file` is a path; False if `h5file` is a h5py.File object.
f10686:m9
def trim(self, key):
current_index = self.meta[key]['<STR_LIT>']<EOL>self.hdf5[key].resize(current_index, axis=<NUM_LIT:0>)<EOL>
Removes empty rows from dataset... I am still wanting to use this??? :param key: the dataset to trim :type key: str
f10686:c0:m12
def consolidate(self, key):
if self.meta[key]['<STR_LIT>'] not in ['<STR_LIT>']:<EOL><INDENT>print("<STR_LIT>", self.meta[key]['<STR_LIT>'])<EOL>return<EOL><DEDENT>attr_tmp = list(self.hdf5[key].attrs.items())<EOL>del self.hdf5[key]<EOL>setnum = self.meta[key]['<STR_LIT>']<EOL>setnum -= <NUM_LIT:1> <EOL>current_index = self.meta[key]['<STR_LIT>']<EOL>total_samples = (self.chunk_size * setnum) + current_index<EOL>self.datasets[key] = self.hdf5.create_dataset(key, (total_samples,))<EOL>self.datasets[key].attrs['<STR_LIT>'] = '<STR_LIT>' <EOL>self.datasets[key].attrs['<STR_LIT:start>'] = self.meta[key]['<STR_LIT:start>']<EOL>self.datasets[key].attrs['<STR_LIT>'] = '<STR_LIT>'<EOL>for iset in range(<NUM_LIT:0>, setnum):<EOL><INDENT>self.datasets[key][iset*self.chunk_size:(iset+<NUM_LIT:1>)*self.chunk_size] = self.datasets[key+'<STR_LIT>'+str(iset+<NUM_LIT:1>)][:]<EOL>self.datasets[key].attrs['<STR_LIT>'] = self.datasets[key].attrs['<STR_LIT>'] + self.datasets[key+'<STR_LIT>'+str(iset+<NUM_LIT:1>)].attrs['<STR_LIT>']<EOL><DEDENT>if current_index != <NUM_LIT:0>:<EOL><INDENT>self.datasets[key][setnum*self.chunk_size:(setnum*self.chunk_size)+current_index] = self.datasets[key+'<STR_LIT>'+str(setnum+<NUM_LIT:1>)][:current_index]<EOL>self.datasets[key].attrs['<STR_LIT>'] = self.datasets[key].attrs['<STR_LIT>'] + self.datasets[key+'<STR_LIT>'+str(setnum+<NUM_LIT:1>)].attrs['<STR_LIT>']<EOL><DEDENT>if self.datasets[key].attrs['<STR_LIT>'][-<NUM_LIT:1>] != '<STR_LIT:]>':<EOL><INDENT>self.datasets[key].attrs['<STR_LIT>'] = self.datasets[key].attrs['<STR_LIT>'] + '<STR_LIT:]>'<EOL><DEDENT>for k, v in attr_tmp:<EOL><INDENT>self.datasets[key].attrs[k] = v<EOL><DEDENT>for iset in range(setnum+<NUM_LIT:1>):<EOL><INDENT>del self.datasets[key+'<STR_LIT>'+str(iset+<NUM_LIT:1>)]<EOL>del self.hdf5[key+'<STR_LIT>'+str(iset+<NUM_LIT:1>)]<EOL><DEDENT>print('<STR_LIT>', list(self.hdf5.keys()))<EOL>print('<STR_LIT>', self.datasets[key].attrs['<STR_LIT>'])<EOL>print()<EOL>self.needs_repack = True<EOL>
Collapses a 'continuous' acquisition into a single dataset. This must be performed before calling *get* function for *key* in these modes. :param key: name of the dataset to consolidate. :type key: str
f10686:c0:m13
def increment(index, dims, data_shape):
<EOL>inc_to_match = data_shape[<NUM_LIT:1>:]<EOL>for dim_a, dim_b in zip(inc_to_match, dims[-<NUM_LIT:1>*(len(inc_to_match)):]):<EOL><INDENT>if dim_a != dim_b:<EOL><INDENT>raise DataIndexError()<EOL><DEDENT><DEDENT>inc_index = len(index) - len(data_shape)<EOL>inc_amount = data_shape[<NUM_LIT:0>]<EOL>index[inc_index] += inc_amount<EOL>if index[inc_index] > dims[inc_index]:<EOL><INDENT>raise DataIndexError()<EOL><DEDENT>while inc_index > <NUM_LIT:0> and index[inc_index] == dims[inc_index]:<EOL><INDENT>index[inc_index-<NUM_LIT:1>] +=<NUM_LIT:1><EOL>index[inc_index:] = [<NUM_LIT:0>]*len(index[inc_index:])<EOL>inc_index -=<NUM_LIT:1><EOL><DEDENT>return index<EOL>
Increments a given index according to the shape of the data added :param index: Current index to be incremented :type index: list :param dims: Shape of the data that the index is being incremented by :type dims: tuple :param data_shape: Shape of the data structure being incremented, this is check that incrementing is correct :returns: list - the incremented index
f10687:m0
def close(self):
raise NotImplementedError<EOL>
Closes the datafile, only one reference to a file may be open at one time. If there is no data in the file, it will delete itself
f10687:c0:m1
def init_group(self, key, mode='<STR_LIT>'):
raise NotImplementedError<EOL>
Create a group hierarchy level :param key: The name of the group, may be nested e.g. 'topgroup/subgroub' :type key: str :param mode: The type of acquisition this group is for. Options are: 'finite', 'calibration', 'open', 'continuous' :type mode: str
f10687:c0:m2
def init_data(self, key, dims=None, mode='<STR_LIT>', nested_name=None):
raise NotImplementedError<EOL>
Initializes a new dataset :param key: The dataset or group name. If finite, this will create a group (if none exists), and will sequentially name datasets under this group test_# :type key: str :type dims: tuple :param dims: Dimensions of dataset: * if mode == 'finite', this is the total size * if mode == 'open', this is the dimension of a single trace * if mode == 'continuous', this is ignored * if mode == 'calibration', this is the total size :param mode: The kind of acquisition taking place :type mode: str :param nested_name: If mode is calibration, then this will be the dataset name created under the group key. Ignored for other modes. :type nested_name: str
f10687:c0:m3
def append(self, key, data, nested_name=None):
raise NotImplementedError<EOL>
Inserts data sequentially to structure in repeated calls. Depending on how the dataset was initialized: * If mode == 'finite': If *nested_name* is ``None``, data is appended to the current automatically incremented *test_#* dataset under the given group. Otherwise data is appended to the group *key*, dataset *nested_name*. * If mode == 'calibration': Must provide a *nested_name* for a dataset to append data to under group *key* * If mode == 'open': Appends chunk to dataset *key* * If mode == 'continuous': Appends to dataset *key* forever For 'Finite' and 'calibration' modes, an attempt to append past the initialized dataset size will result in an error :param key: name of the dataset/group to append to :type key: str :param data: data to add to file :type data: numpy.ndarray :param nested_name: If mode is 'calibration' or 'finite', then this will be the dataset name created under the group key. Ignored for other modes. :type nested_name: str
f10687:c0:m4
def insert(self, key, index, data):
raise NotImplementedError<EOL>
Inserts data to index location. For 'finite' mode only. Does not affect appending location marker. Will Overwrite existing data. :param key: Group name to insert to :type key: str :param index: location that the data should be inserted :type index: tuple :param data: data to add to file :type data: numpy.ndarray
f10687:c0:m5
def get_data(self, key, index=None):
raise NotImplementedError<EOL>
Returns data for key at specified index :param key: name of the dataset to retrieve, may be nested :type key: str :param index: slice of of the data to retrieve, ``None`` gets whole data set. Numpy style indexing. :type index: tuple
f10687:c0:m6
def get_info(self, key, inherited=False):
raise NotImplementedError<EOL>
Retrieves all saved attributes for the group or dataset. :param key: The name of group or dataset to get info for :type key: str :param inherited: If data uses a heirachial structure, includes inherited attributes. :type inherited: bool :returns: dict -- named attibutes and values
f10687:c0:m7
def get_calibration(self, key, reffreq):
raise NotImplementedError<EOL>
Gets a saved calibration, in attenuation from a refernece frequency point :param key: THe name of the calibraiton to retrieve :type key: str :param reffreq: The frequency for which to set as zero, all other frequencies will then be in attenuation difference from this frequency :type reffreq: int :returns: (numpy.ndarray, numpy.ndarray) -- frequencies of the attenuation vector, attenuation values
f10687:c0:m8
def calibration_list(self):
raise NotImplementedError<EOL>
Lists the calibrations present in this file :returns: list<str> -- the keys for the calibration groups
f10687:c0:m9
def delete_group(self, key):
raise NotImplementedError<EOL>
Removes the group from the file, deleting all data under it :param key: Name of group to remove :type key: str
f10687:c0:m10
def set_metadata(self, key, attrdict, signal=False):
raise NotImplementedError<EOL>
Sets attributes for a dataset or group :param key: name of group or dataset :type key: str :param attrdict: A collection of name:value pairs to save as metadata :type attrdict: dict
f10687:c0:m11
def append_trace_info(self, key, stim_data):
raise NotImplementedError<EOL>
Sets the stimulus documentation for the given dataset/groupname. If key is for a finite group, sets for current test :param key: Group or dataset name :type key: str :param stim_data: JSON formatted data to append to a list :type stim_data: str
f10687:c0:m12
def keys(self):
raise NotImplementedError<EOL>
The high-level keys for this file. This may be the names of groups, and/or datasets. :returns: list<str> -- list of the keys
f10687:c0:m13
def all_datasets(self):
raise NotImplementedError<EOL>
Returns a list containing all datasets anywhere within file. Warning: this will get all the data in the file
f10687:c0:m14
def dataset_names(self):
raise NotImplementedError<EOL>
Returns a list of the name of every dataset in this file. Each name is a valid key for get_info and get_data
f10687:c0:m15
def get_trace_stim(self, key):
raise NotImplementedError<EOL>
Gets a list of the stimulus metadata for the given dataset *key*. :param key: The name of group or dataset to get stimulus info for :type key: str :returns: list<dict> -- each dict in the list holds the stimulus info for each trace in the test. Therefore, the list should have a length equal to the number of traces in the given test.
f10687:c0:m16
def open_acqdata(filename, user='<STR_LIT>', filemode='<STR_LIT>'):
if filename.lower().endswith(("<STR_LIT>", "<STR_LIT>")):<EOL><INDENT>return HDF5Data(filename, user, filemode)<EOL><DEDENT>elif filename.lower().endswith(("<STR_LIT>", "<STR_LIT>")):<EOL><INDENT>return BatlabData(filename, user, filemode)<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>", filename)<EOL><DEDENT>
Opens and returns the correct AcquisitionData object according to filename extention. Supported extentions: * .hdf5, .h5 for sparkle data * .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>` examples (if data file already exists):: data = open_acqdata('myexperiment.hdf5', filemode='r') print data.dataset_names() for batlab data:: data = open('mouse666.raw', filemode='r') print data.dataset_names()
f10688:m0
def cache_file(symbol, func, has_date, root, date_type='<STR_LIT:date>'):
cur_mod = sys.modules[func.__module__]<EOL>data_tz = getattr(cur_mod, '<STR_LIT>') if hasattr(cur_mod, '<STR_LIT>') else '<STR_LIT>'<EOL>cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)<EOL>if has_date:<EOL><INDENT>if hasattr(cur_mod, '<STR_LIT>'):<EOL><INDENT>file_fmt = getattr(cur_mod, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>file_fmt = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if hasattr(cur_mod, '<STR_LIT>'):<EOL><INDENT>file_fmt = getattr(cur_mod, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>file_fmt = '<STR_LIT>'<EOL><DEDENT><DEDENT>return data_file(<EOL>file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol<EOL>)<EOL>
Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file
f10694:m0
def update_data(func):
default = dict([<EOL>(param.name, param.default)<EOL>for param in inspect.signature(func).parameters.values()<EOL>if param.default != getattr(inspect, '<STR_LIT>')<EOL>])<EOL>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>default.update(kwargs)<EOL>kwargs.update(default)<EOL>cur_mod = sys.modules[func.__module__]<EOL>logger = logs.get_logger(name_or_func=f'<STR_LIT>', types='<STR_LIT>')<EOL>root_path = cur_mod.DATA_PATH<EOL>date_type = kwargs.pop('<STR_LIT>', '<STR_LIT:date>')<EOL>save_static = kwargs.pop('<STR_LIT>', True)<EOL>save_dynamic = kwargs.pop('<STR_LIT>', True)<EOL>symbol = kwargs.get('<STR_LIT>')<EOL>file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)<EOL>d_file = cache_file(has_date=True, **file_kw)<EOL>s_file = cache_file(has_date=False, **file_kw)<EOL>cached = kwargs.pop('<STR_LIT>', False)<EOL>if cached and save_static and files.exists(s_file):<EOL><INDENT>logger.info(f'<STR_LIT>')<EOL>return pd.read_parquet(s_file)<EOL><DEDENT>data = func(*args, **kwargs)<EOL>if save_static:<EOL><INDENT>files.create_folder(s_file, is_file=True)<EOL>save_data(data=data, file_fmt=s_file, append=False)<EOL>logger.info(f'<STR_LIT>')<EOL><DEDENT>if save_dynamic:<EOL><INDENT>drop_dups = kwargs.pop('<STR_LIT>', None)<EOL>files.create_folder(d_file, is_file=True)<EOL>save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)<EOL>logger.info(f'<STR_LIT>')<EOL><DEDENT>return data<EOL><DEDENT>return wrapper<EOL>
Decorator to save data more easily. Use parquet as data format Args: func: function to load data from data source Returns: wrapped function
f10694:m1
def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):
d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)<EOL>if append and files.exists(d_file):<EOL><INDENT>data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))<EOL>if drop_dups is not None:<EOL><INDENT>data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)<EOL><DEDENT><DEDENT>if not data.empty: data.to_parquet(d_file)<EOL>return data<EOL>
Save data to file Args: data: pd.DataFrame file_fmt: data file format in terms of f-strings append: if append data to existing data drop_dups: list, drop duplicates in columns info: dict, infomation to be hashed and passed to f-strings **kwargs: additional parameters for f-strings Examples: >>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> # save_data( >>> # data, '{ROOT}/daily/{typ}.parq', >>> # ROOT='tests/data', typ='earnings' >>> # )
f10694:m2
def data_file(file_fmt, info=None, **kwargs):
if isinstance(info, dict):<EOL><INDENT>kwargs['<STR_LIT>'] = hashlib.sha256(json.dumps(info).encode('<STR_LIT:utf-8>')).hexdigest()<EOL>kwargs.update(info)<EOL><DEDENT>return utils.fstr(fmt=file_fmt, **kwargs)<EOL>
Data file name for given infomation Args: file_fmt: file format in terms of f-strings info: dict, to be hashed and then pass to f-string using 'hash_key' these info will also be passed to f-strings **kwargs: arguments for f-strings Returns: str: data file name
f10694:m3
def exists(path) -> bool:
return os.path.exists(path=path)<EOL>
Check path or file exists (use os.path.exists) Args: path: path or file Examples >>> exists(f'{abspath(__file__, 1)}/xone/tests/files/test_1.json') True >>> exists(f'{abspath(__file__)}/tests/files/notfound.yml') False
f10696:m0
def abspath(cur_file, parent=<NUM_LIT:0>) -> str:
file_path = os.path.abspath(cur_file).replace('<STR_LIT:\\>', '<STR_LIT:/>')<EOL>if os.path.isdir(file_path) and parent == <NUM_LIT:0>: return file_path<EOL>adj = <NUM_LIT:1> - os.path.isdir(file_path)<EOL>return '<STR_LIT:/>'.join(file_path.split('<STR_LIT:/>')[:-(parent + adj)])<EOL>
Absolute path Args: cur_file: __file__ or file or path str parent: level of parent to look for Returns: str
f10696:m1
def create_folder(path_name: str, is_file=False):
path_sep = path_name.replace('<STR_LIT:\\>', '<STR_LIT:/>').split('<STR_LIT:/>')<EOL>for i in range(<NUM_LIT:1>, len(path_sep) + (<NUM_LIT:0> if is_file else <NUM_LIT:1>)):<EOL><INDENT>cur_path = '<STR_LIT:/>'.join(path_sep[:i])<EOL>if not os.path.exists(cur_path): os.mkdir(cur_path)<EOL><DEDENT>
Make folder as well as all parent folders if not exists Args: path_name: full path name is_file: whether input is name of file
f10696:m2
def all_files(<EOL>path_name, keyword='<STR_LIT>', ext='<STR_LIT>', full_path=True,<EOL>has_date=False, date_fmt=DATE_FMT<EOL>) -> list:
if not os.path.exists(path=path_name): return []<EOL>path_name = path_name.replace('<STR_LIT:\\>', '<STR_LIT:/>')<EOL>if keyword or ext:<EOL><INDENT>keyword = f'<STR_LIT>' if keyword else '<STR_LIT:*>'<EOL>if not ext: ext = '<STR_LIT:*>'<EOL>files = sort_by_modified([<EOL>f.replace('<STR_LIT:\\>', '<STR_LIT:/>') for f in glob.iglob(f'<STR_LIT>')<EOL>if os.path.isfile(f) and (f.replace('<STR_LIT:\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>][<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>else:<EOL><INDENT>files = sort_by_modified([<EOL>f'<STR_LIT>' for f in os.listdir(path=path_name)<EOL>if os.path.isfile(f'<STR_LIT>') and (f[<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>if has_date:<EOL><INDENT>files = filter_by_dates(files, date_fmt=date_fmt)<EOL><DEDENT>return files if full_path else [f.split('<STR_LIT:/>')[-<NUM_LIT:1>] for f in files]<EOL>
Search all files with criteria Returned list will be sorted by last modified Args: path_name: full path name keyword: keyword to search ext: file extensions, split by ',' full_path: whether return full path (default True) has_date: whether has date in file name (default False) date_fmt: date format to check for has_date parameter Returns: list: all file names with criteria fulfilled Examples: >>> test_folder = f'{abspath(__file__)}/tests/files' >>> all_files(test_folder, keyword='test', full_path=False) ['test_2.json', 'test_1.json'] >>> all_files(test_folder, has_date=True, full_path=False) ['dates_2019-01-02.yml', 'dates_2019-01-01.yml']
f10696:m3
def all_folders(<EOL>path_name, keyword='<STR_LIT>', has_date=False, date_fmt=DATE_FMT<EOL>) -> list:
if not os.path.exists(path=path_name): return []<EOL>path_name = path_name.replace('<STR_LIT:\\>', '<STR_LIT:/>')<EOL>if keyword:<EOL><INDENT>folders = sort_by_modified([<EOL>f.replace('<STR_LIT:\\>', '<STR_LIT:/>') for f in glob.iglob(f'<STR_LIT>')<EOL>if os.path.isdir(f) and (f.replace('<STR_LIT:\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>][<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>else:<EOL><INDENT>folders = sort_by_modified([<EOL>f'<STR_LIT>' for f in os.listdir(path=path_name)<EOL>if os.path.isdir(f'<STR_LIT>') and (f[<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>if has_date:<EOL><INDENT>folders = filter_by_dates(folders, date_fmt=date_fmt)<EOL><DEDENT>return folders<EOL>
Search all folders with criteria Returned list will be sorted by last modified Args: path_name: full path name keyword: keyword to search has_date: whether has date in file name (default False) date_fmt: date format to check for has_date parameter Returns: list: all folder names fulfilled criteria Examples: >>> target_folder = f'{abspath(__file__)}/tests/folders' >>> for f in sorted(all_folders(target_folder, keyword='test')): ... print(f.split('/')[-1]) test_1 test_2 >>> for f in sorted(all_folders(target_folder, has_date=True)): ... print(f.split('/')[-1]) dates_2019-01-01 dates_2019-01-02_labeled dates_2019-01-03
f10696:m4
def sort_by_modified(files_or_folders: list) -> list:
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)<EOL>
Sort files or folders by modified time Args: files_or_folders: list of files or folders Returns: list
f10696:m5
def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:
r = re.compile(f'<STR_LIT>')<EOL>return list(filter(<EOL>lambda vv: r.match(vv.replace('<STR_LIT:\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>]) is not None,<EOL>files_or_folders,<EOL>))<EOL>
Filter files or dates by date patterns Args: files_or_folders: list of files or folders date_fmt: date format Returns: list Examples: >>> filter_by_dates([ ... 't1/dts_2019-01-01', 't2/dts_2019-01-02', 't3/nodts_2019-01' ... ]) ['t1/dts_2019-01-01', 't2/dts_2019-01-02']
f10696:m6
def file_modified_time(file_name) -> pd.Timestamp:
return pd.to_datetime(time.ctime(os.path.getmtime(file_name)))<EOL>
File modified time in python Args: file_name: file name Returns: pd.Timestamp
f10696:m8
def trading_dates(start, end, calendar='<STR_LIT>'):
kw = dict(start=pd.Timestamp(start, tz='<STR_LIT>').date(), end=pd.Timestamp(end, tz='<STR_LIT>').date())<EOL>us_cal = getattr(sys.modules[__name__], f'<STR_LIT>')()<EOL>return pd.bdate_range(**kw).drop(us_cal.holidays(**kw))<EOL>
Trading dates for given exchange Args: start: start date end: end date calendar: exchange as string Returns: pd.DatetimeIndex: datetime index Examples: >>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27'] >>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27') >>> assert len(trd_dates) == len(bus_dates) >>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all()
f10697:m0
def profile(func):
def inner(*args, **kwargs):<EOL><INDENT>pr = cProfile.Profile()<EOL>pr.enable()<EOL>res = func(*args, **kwargs)<EOL>pr.disable()<EOL>s = io.StringIO()<EOL>ps = pstats.Stats(pr, stream=s).sort_stats('<STR_LIT>')<EOL>ps.print_stats()<EOL>print(s.getvalue())<EOL>return res<EOL><DEDENT>return inner<EOL>
Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/
f10698:m0
def get_logger(<EOL>name_or_func, log_file='<STR_LIT>', level=logging.INFO, types='<STR_LIT>', **kwargs<EOL>):
if isinstance(level, str): level = getattr(logging, level.upper())<EOL>log_name = name_or_func if isinstance(name_or_func, str) else utils.func_scope(name_or_func)<EOL>logger = logging.getLogger(name=log_name)<EOL>logger.setLevel(level=level)<EOL>if not len(logger.handlers):<EOL><INDENT>formatter = logging.Formatter(fmt=kwargs.get('<STR_LIT>', LOG_FMT))<EOL>if '<STR_LIT:file>' in types:<EOL><INDENT>file_handler = logging.FileHandler(log_file)<EOL>file_handler.setFormatter(fmt=formatter)<EOL>logger.addHandler(file_handler)<EOL><DEDENT>if '<STR_LIT>' in types:<EOL><INDENT>stream_handler = logging.StreamHandler()<EOL>stream_handler.setFormatter(fmt=formatter)<EOL>logger.addHandler(stream_handler)<EOL><DEDENT><DEDENT>return logger<EOL>
Generate logger Args: name_or_func: logger name or current running function log_file: logger file level: level of logs - debug, info, error types: file or stream, or both Returns: logger Examples: >>> get_logger(name_or_func='download_data', level='debug', types='stream') <Logger download_data (DEBUG)> >>> get_logger(name_or_func='preprocess', log_file='pre.log', types='file|stream') <Logger preprocess (INFO)>
f10700:m0
def plot_multi(data, cols=None, spacing=<NUM_LIT>, color_map=None, plot_kw=None, **kwargs):
import matplotlib.pyplot as plt<EOL>from pandas import plotting<EOL>if cols is None: cols = data.columns<EOL>if plot_kw is None: plot_kw = [{}] * len(cols)<EOL>if len(cols) == <NUM_LIT:0>: return<EOL>num_colors = len(utils.flatten(cols))<EOL>colors = getattr(getattr(plotting, '<STR_LIT>'), '<STR_LIT>')(num_colors=num_colors)<EOL>if color_map is None: color_map = dict()<EOL>fig = plt.figure()<EOL>ax, lines, labels, c_idx = None, [], [], <NUM_LIT:0><EOL>for n, col in enumerate(cols):<EOL><INDENT>if isinstance(col, (list, tuple)):<EOL><INDENT>ylabel = '<STR_LIT>'.join(cols[n])<EOL>color = [<EOL>color_map.get(cols[n][_ - c_idx], colors[_ % len(colors)])<EOL>for _ in range(c_idx, c_idx + len(cols[n]))<EOL>]<EOL>c_idx += len(col)<EOL><DEDENT>else:<EOL><INDENT>ylabel = col<EOL>color = color_map.get(col, colors[c_idx % len(colors)])<EOL>c_idx += <NUM_LIT:1><EOL><DEDENT>if '<STR_LIT>' in plot_kw[n]: color = plot_kw[n].pop('<STR_LIT>')<EOL>if ax is None:<EOL><INDENT>legend = plot_kw[<NUM_LIT:0>].pop('<STR_LIT>', kwargs.pop('<STR_LIT>', False))<EOL>ax = data.loc[:, col].plot(<EOL>label=col, color=color, legend=legend, zorder=n, **plot_kw[<NUM_LIT:0>], **kwargs<EOL>)<EOL>ax.set_ylabel(ylabel=ylabel)<EOL>line, label = ax.get_legend_handles_labels()<EOL>ax.spines['<STR_LIT:left>'].set_edgecolor('<STR_LIT>')<EOL>ax.spines['<STR_LIT:left>'].set_alpha(<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>legend = plot_kw[n].pop('<STR_LIT>', False)<EOL>ax_new = ax.twinx()<EOL>ax_new.spines['<STR_LIT:right>'].set_position(('<STR_LIT>', <NUM_LIT:1> + spacing * (n - <NUM_LIT:1>)))<EOL>data.loc[:, col].plot(<EOL>ax=ax_new, label=col, color=color, legend=legend, zorder=n, **plot_kw[n]<EOL>)<EOL>ax_new.set_ylabel(ylabel=ylabel)<EOL>line, label = ax_new.get_legend_handles_labels()<EOL>ax_new.spines['<STR_LIT:right>'].set_edgecolor('<STR_LIT>')<EOL>ax_new.spines['<STR_LIT:right>'].set_alpha(<NUM_LIT>)<EOL>ax_new.grid(False)<EOL><DEDENT>lines += line<EOL>labels += label<EOL><DEDENT>fig.legend(lines, labels, loc=<NUM_LIT:8>, prop=dict(), ncol=num_colors).set_zorder(len(cols))<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>
Plot data with multiple scaels together Args: data: DataFrame of data cols: columns to be plotted spacing: spacing between legends color_map: customized colors in map plot_kw: kwargs for each plot **kwargs: kwargs for the first plot Returns: ax for plot Examples: >>> import pandas as pd >>> import numpy as np >>> >>> idx = range(5) >>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx) >>> # plot_multi(data=data, cols=['a', 'b'], plot_kw=[dict(style='.-'), dict()])
f10701:m0
def plot_h(data, cols, wspace=<NUM_LIT>, plot_kw=None, **kwargs):
import matplotlib.pyplot as plt<EOL>if plot_kw is None: plot_kw = [dict()] * len(cols)<EOL>_, axes = plt.subplots(nrows=<NUM_LIT:1>, ncols=len(cols), **kwargs)<EOL>plt.subplots_adjust(wspace=wspace)<EOL>for n, col in enumerate(cols):<EOL><INDENT>data.loc[:, col].plot(ax=axes[n], **plot_kw[n])<EOL><DEDENT>return axes<EOL>
Plot horizontally Args: data: DataFrame of data cols: columns to be plotted wspace: spacing between plots plot_kw: kwargs for each plot **kwargs: kwargs for the whole plot Returns: axes for plots Examples: >>> import pandas as pd >>> import numpy as np >>> >>> idx = range(5) >>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx) >>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()])
f10701:m1
def tolist(iterable):
return pd.Series(iterable).drop_duplicates().tolist()<EOL>
Simpler implementation of flatten method Args: iterable: any array or value Returns: list: list of unique values Examples: >>> tolist('xyz') ['xyz'] >>> tolist(['ab', 'cd', 'xy', 'ab']) ['ab', 'cd', 'xy']
f10702:m0
def fmt_dt(dt, fmt='<STR_LIT>'):
return pd.Timestamp(dt).strftime(fmt)<EOL>
Format date string Args: dt: any date format fmt: output date format Returns: str: date format Examples: >>> fmt_dt(dt='2018-12') '2018-12-01' >>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d') '20181231'
f10702:m1
def trade_day(dt, cal='<STR_LIT>'):
from xone import calendar<EOL>dt = pd.Timestamp(dt).date()<EOL>return calendar.trading_dates(start=dt - pd.Timedelta('<STR_LIT>'), end=dt, calendar=cal)[-<NUM_LIT:1>]<EOL>
Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24'
f10702:m2
def cur_time(typ='<STR_LIT:date>', tz=DEFAULT_TZ, trading=True, cal='<STR_LIT>'):
dt = pd.Timestamp('<STR_LIT>', tz=tz)<EOL>if typ == '<STR_LIT:date>':<EOL><INDENT>if trading: return trade_day(dt=dt, cal=cal).strftime('<STR_LIT>')<EOL>else: return dt.strftime('<STR_LIT>')<EOL><DEDENT>if typ == '<STR_LIT:time>': return dt.strftime('<STR_LIT>')<EOL>if typ == '<STR_LIT>': return dt.strftime('<STR_LIT>')<EOL>if typ == '<STR_LIT>': return dt<EOL>return trade_day(dt).date() if trading else dt.date()<EOL>
Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True
f10702:m3
def align_data(*args):
res = pd.DataFrame(pd.concat([<EOL>d.loc[~d.index.duplicated(keep='<STR_LIT>')].rename(<EOL>columns=lambda vv: '<STR_LIT>' % (vv, i + <NUM_LIT:1>)<EOL>) for i, d in enumerate(args)<EOL>], axis=<NUM_LIT:1>))<EOL>data_cols = [col for col in res.columns if col[-<NUM_LIT:2>:] == '<STR_LIT>']<EOL>other_cols = [col for col in res.columns if col[-<NUM_LIT:2>:] != '<STR_LIT>']<EOL>res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='<STR_LIT>')<EOL>return res.dropna(subset=data_cols)<EOL>
Resample and aligh data for defined frequency Args: *args: DataFrame of data to be aligned Returns: pd.DataFrame: aligned data with renamed columns Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> d1 price volume 2018-09-10 10:10:00+10:00 31.08 10166 2018-09-10 10:11:00+10:00 31.10 69981 2018-09-10 10:12:00+10:00 31.11 14343 2018-09-10 10:13:00+10:00 31.07 10096 2018-09-10 10:14:00+10:00 31.04 11506 2018-09-10 10:15:00+10:00 31.04 9718 >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> d2 price volume 2018-09-10 10:10:00+10:00 70.81 4749 2018-09-10 10:11:00+10:00 70.78 6762 2018-09-10 10:12:00+10:00 70.85 4908 2018-09-10 10:13:00+10:00 70.79 2002 2018-09-10 10:14:00+10:00 70.79 9170 2018-09-10 10:15:00+10:00 70.79 9791 >>> align_data(d1, d2) price_1 volume_1 price_2 volume_2 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
f10702:m4
def cat_data(data_kw):
if len(data_kw) == <NUM_LIT:0>: return pd.DataFrame()<EOL>return pd.DataFrame(pd.concat([<EOL>data.assign(ticker=ticker).set_index('<STR_LIT>', append=True)<EOL>.unstack('<STR_LIT>').swaplevel(<NUM_LIT:0>, <NUM_LIT:1>, axis=<NUM_LIT:1>)<EOL>for ticker, data in data_kw.items()<EOL>], axis=<NUM_LIT:1>))<EOL>
Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00
f10702:m5
def flatten(iterable, maps=None, unique=False):
if iterable is None: return []<EOL>if maps is None: maps = dict()<EOL>if isinstance(iterable, (str, int, float)):<EOL><INDENT>return [maps.get(iterable, iterable)]<EOL><DEDENT>else:<EOL><INDENT>x = [maps.get(item, item) for item in _to_gen_(iterable)]<EOL>return list(set(x)) if unique else x<EOL><DEDENT>
Flatten any array of items to list Args: iterable: any array or value maps: map items to values unique: drop duplicates Returns: list: flattened list References: https://stackoverflow.com/a/40857703/1332656 Examples: >>> flatten('abc') ['abc'] >>> flatten(1) [1] >>> flatten(1.) [1.0] >>> flatten(['ab', 'cd', ['xy', 'zz']]) ['ab', 'cd', 'xy', 'zz'] >>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'}) ['ab', '0x', 'zz']
f10702:m6
def _to_gen_(iterable):
from collections import Iterable<EOL>for elm in iterable:<EOL><INDENT>if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):<EOL><INDENT>yield from flatten(elm)<EOL><DEDENT>else: yield elm<EOL><DEDENT>
Recursively iterate lists and tuples
f10702:m7
def to_frame(data_list, exc_cols=None, **kwargs):
from collections import OrderedDict<EOL>return pd.DataFrame(<EOL>pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs<EOL>).drop(columns=[] if exc_cols is None else exc_cols)<EOL>
Dict in Python 3.6 keeps insertion order, but cannot be relied upon This method is to keep column names in order In Python 3.7 this method is redundant Args: data_list: list of dict exc_cols: exclude columns Returns: pd.DataFrame Example: >>> d_list = [ ... dict(sid=1, symbol='1 HK', price=89), ... dict(sid=700, symbol='700 HK', price=350) ... ] >>> to_frame(d_list) sid symbol price 0 1 1 HK 89 1 700 700 HK 350 >>> to_frame(d_list, exc_cols=['price']) sid symbol 0 1 1 HK 1 700 700 HK
f10702:m8
def spline_curve(x, y, step, val_min=<NUM_LIT:0>, val_max=None, kind='<STR_LIT>', **kwargs):
from scipy.interpolate import interp1d<EOL>from collections import OrderedDict<EOL>if isinstance(y, pd.DataFrame):<EOL><INDENT>return pd.DataFrame(OrderedDict([(col, spline_curve(<EOL>x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind<EOL>)) for col in y.columns]))<EOL><DEDENT>fitted_curve = interp1d(x, y, kind=kind, **kwargs)<EOL>new_x = np.arange(x.min(), x.max() + step / <NUM_LIT>, step=step)<EOL>return pd.Series(<EOL>new_x, index=new_x, name=y.name if hasattr(y, '<STR_LIT:name>') else None<EOL>).apply(fitted_curve).clip(val_min, val_max)<EOL>
Fit spline curve for given x, y values Args: x: x-values y: y-values step: step size for interpolation val_min: minimum value of result val_max: maximum value of result kind: for scipy.interpolate.interp1d Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. **kwargs: additional parameters for interp1d Returns: pd.Series: fitted curve Examples: >>> x = pd.Series([1, 2, 3]) >>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)]) >>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate') >>> r.round(2).index.tolist() [1.0, 1.5, 2.0, 2.5, 3.0] >>> r.round(2).tolist() [3.0, 4.05, 7.39, 12.73, 18.0] >>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4])) >>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate') >>> r_df.round(2) a b 1.00 3.00 3.00 1.50 4.05 3.00 2.00 7.39 3.00 2.50 12.73 3.50 3.00 20.09 4.00
f10702:m9
def func_scope(func):
cur_mod = sys.modules[func.__module__]<EOL>return f'<STR_LIT>'<EOL>
Function scope name Args: func: python function Returns: str: module_name.func_name Examples: >>> func_scope(flatten) 'xone.utils.flatten' >>> func_scope(json.dump) 'json.dump'
f10702:m10
def format_float(digit=<NUM_LIT:0>, is_pct=False):
if is_pct:<EOL><INDENT>space = '<STR_LIT:U+0020>' if digit < <NUM_LIT:0> else '<STR_LIT>'<EOL>fmt = f'<STR_LIT>'<EOL>return lambda vv: '<STR_LIT>' if np.isnan(vv) else fmt.format(vv)<EOL><DEDENT>else:<EOL><INDENT>return lambda vv: '<STR_LIT>' if np.isnan(vv) else (<EOL>f'<STR_LIT>'.format(vv) if vv else '<STR_LIT:->' + '<STR_LIT:U+0020>' * abs(digit)<EOL>)<EOL><DEDENT>
Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2)
f10702:m11
def fstr(fmt, **kwargs):
locals().update(kwargs)<EOL>return f'<STR_LIT>'<EOL>
Delayed evaluation of f-strings Args: fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq' **kwargs: variables for f-strings, i.e., path, file = '/data', 'daily' Returns: FString object References: https://stackoverflow.com/a/42497694/1332656 https://stackoverflow.com/a/4014070/1332656 Examples: >>> fmt = '{data_path}/{data_file}.parq' >>> fstr(fmt, data_path='your/data/path', data_file='sample') 'your/data/path/sample.parq'
f10702:m12
def to_str(data: dict, fmt='<STR_LIT>', sep='<STR_LIT:U+002CU+0020>', public_only=True):
if public_only: keys = list(filter(lambda vv: vv[<NUM_LIT:0>] != '<STR_LIT:_>', data.keys()))<EOL>else: keys = list(data.keys())<EOL>return '<STR_LIT:{>' + sep.join([<EOL>to_str(data=v, fmt=fmt, sep=sep)<EOL>if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)<EOL>for k, v in data.items() if k in keys<EOL>]) + '<STR_LIT:}>'<EOL>
Convert dict to string Args: data: dict fmt: how key and value being represented sep: how pairs of key and value are seperated public_only: if display public members only Returns: str: string representation of dict Examples: >>> test_dict = dict(b=1, a=0, c=2, _d=3) >>> to_str(test_dict) '{b=1, a=0, c=2}' >>> to_str(test_dict, sep='|') '{b=1|a=0|c=2}' >>> to_str(test_dict, public_only=False) '{b=1, a=0, c=2, _d=3}'
f10702:m13
def inst_repr(instance, fmt='<STR_LIT:str>', public_only=True):
if not hasattr(instance, '<STR_LIT>'): return '<STR_LIT>'<EOL>if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[<NUM_LIT:0>] != '<STR_LIT:_>'}<EOL>else: inst_dict = instance.__dict__<EOL>if fmt == '<STR_LIT>': return json.dumps(inst_dict, indent=<NUM_LIT:2>)<EOL>elif fmt == '<STR_LIT:str>': return to_str(inst_dict, public_only=public_only)<EOL>return '<STR_LIT>'<EOL>
Generate class instance signature from its __dict__ From python 3.6 dict is ordered and order of attributes will be preserved automatically Args: instance: class instance fmt: ['json', 'str'] public_only: if display public members only Returns: str: string or json representation of instance Examples: >>> inst_repr(1) '' >>> class SampleClass(object): ... def __init__(self): ... self.b = 3 ... self.a = 4 ... self._private_ = 'hidden' >>> >>> s = SampleClass() >>> inst_repr(s) '{b=3, a=4}' >>> inst_repr(s, public_only=False) '{b=3, a=4, _private_=hidden}' >>> json.loads(inst_repr(s, fmt='json')) {'b': 3, 'a': 4} >>> inst_repr(s, fmt='unknown') ''
f10702:m14
def load_module(full_path):
from importlib import util<EOL>file_name = full_path.replace('<STR_LIT:\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>if file_name[-<NUM_LIT:3>:] != '<STR_LIT>':<EOL><INDENT>raise ImportError(f'<STR_LIT>')<EOL><DEDENT>module_name = file_name[:-<NUM_LIT:3>]<EOL>spec = util.spec_from_file_location(name=module_name, location=full_path)<EOL>module = util.module_from_spec(spec=spec)<EOL>spec.loader.exec_module(module=module)<EOL>return module<EOL>
Load module from full path Args: full_path: module full path name Returns: python module References: https://stackoverflow.com/a/67692/1332656 Examples: >>> import os >>> >>> cur_file = os.path.abspath(__file__).replace('\\\\', '/') >>> cur_path = '/'.join(cur_file.split('/')[:-1]) >>> load_module(f'{cur_path}/files.py').__name__ 'files' >>> load_module(f'{cur_path}/files.pyc') Traceback (most recent call last): ImportError: not a python file: files.pyc
f10702:m15
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):
if max_procs is None: max_procs = cpu_count()<EOL>kw_arr = saturate_kwargs(keys=keys, **kwargs)<EOL>if len(kw_arr) == <NUM_LIT:0>: return<EOL>if isinstance(affinity, int):<EOL><INDENT>win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)<EOL><DEDENT>task_queue = queue.Queue()<EOL>while len(kw_arr) > <NUM_LIT:0>:<EOL><INDENT>for _ in range(max_procs):<EOL><INDENT>if len(kw_arr) == <NUM_LIT:0>: break<EOL>kw = kw_arr.pop(<NUM_LIT:0>)<EOL>p = Process(target=func, kwargs=kw)<EOL>p.start()<EOL>sys.stdout.flush()<EOL>task_queue.put(p)<EOL>if show_proc:<EOL><INDENT>signature = '<STR_LIT:U+002CU+0020>'.join([f'<STR_LIT>' for k, v in kw.items()])<EOL>print(f'<STR_LIT>')<EOL><DEDENT><DEDENT>while not task_queue.empty():<EOL><INDENT>p = task_queue.get()<EOL>p.join()<EOL><DEDENT><DEDENT>
Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func
f10703:m0
def saturate_kwargs(keys, **kwargs):
<EOL>if isinstance(keys, str): keys = [keys]<EOL>keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '<STR_LIT>')]<EOL>if len(keys) == <NUM_LIT:0>: return []<EOL>kw_corr = list(product(*(range(len(kwargs[k])) for k in keys)))<EOL>kw_arr = []<EOL>for corr in kw_corr: kw_arr.append(<EOL>dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))]))<EOL>)<EOL>for k in keys: kwargs.pop(k, None)<EOL>kw_arr = [{**k, **kwargs} for k in kw_arr]<EOL>return kw_arr<EOL>
Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func
f10703:m1
def parse_description():
readme_file = f'<STR_LIT>'<EOL>if path.exists(readme_file):<EOL><INDENT>with open(readme_file, '<STR_LIT:r>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>long_description = f.read()<EOL><DEDENT>return long_description<EOL><DEDENT>return '<STR_LIT>'<EOL>
Parse the description in the README file
f10704:m1
def __init__(self, input_file=None):
self.files = {}<EOL>self.template = templates.BasicTemplate()<EOL>if input_file is not None and os.path.isfile(input_file):<EOL><INDENT>self.load_file(input_file)<EOL><DEDENT>
Create Converter instance :param input_file Will be loaded as a base :type str or unicode
f10709:c0:m0
def load_file(self, input_file):
pyimg = imp.load_source('<STR_LIT>', input_file)<EOL>self.files = pyimg.data<EOL>self.set_template(templates.templateByName(pyimg.template))<EOL>
Loads data array from file (result of this converter) Tries to import, load and replace files' data. It will overwirte previously added items with #add_file or #load_file. :param input_file :type str or unicode
f10709:c0:m1
def set_template(self, template):
if isinstance(template, templates.BasicTemplate):<EOL><INDENT>self.template = template<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>
Sets template to be used when generating output :param template TEmplate instance :type instance of BasicTemplate
f10709:c0:m2
def save(self, filename=None):
if filename is None:<EOL><INDENT>raise IOError('<STR_LIT>')<EOL><DEDENT>cnt = self.output()<EOL>with (open(filename, '<STR_LIT>')) as f:<EOL><INDENT>f.write(cnt.encode('<STR_LIT:utf-8>'))<EOL><DEDENT>
Generates output and saves to given file :param filename File name :type str or unicode
f10709:c0:m3
def add_file(self, filename):
with (open(filename, '<STR_LIT:rb>')) as f:<EOL><INDENT>data = f.read()<EOL><DEDENT>fname = os.path.basename(filename)<EOL>self.files[fname] = base64.b64encode(data)<EOL>
Read and adds given file's content to data array that will be used to generate output :param filename File name to add :type str or unicode
f10709:c0:m4
def remove_file(self, filename):
del self.files[os.path.basename(filename)]<EOL>
Removes item from data array associated with filename :param filename File name :type str to unicode
f10709:c0:m5
def output(self):
if len(self.files) < <NUM_LIT:1>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return self.template.render(self.files)<EOL>
Generates output from data array :returns Pythoned file :rtype str or unicode
f10709:c0:m6
def sync(self):
self.sync_groups()<EOL>self.sync_users()<EOL>
Wrapper method to sync both groups and users.
f10713:c0:m2
def sync_groups(self):
if self.settings.GROUP_FILTER:<EOL><INDENT>ldap_groups = self.ldap.search(self.settings.GROUP_FILTER, self.settings.GROUP_ATTRIBUTES.keys())<EOL>self._sync_ldap_groups(ldap_groups)<EOL>logger.info("<STR_LIT>")<EOL><DEDENT>
Synchronize LDAP groups with local group model.
f10713:c0:m3
def sync_users(self):
if self.settings.USER_FILTER:<EOL><INDENT>user_attributes = self.settings.USER_ATTRIBUTES.keys() + self.settings.USER_EXTRA_ATTRIBUTES<EOL>ldap_users = self.ldap.search(self.settings.USER_FILTER, user_attributes)<EOL>self._sync_ldap_users(ldap_users)<EOL>logger.info("<STR_LIT>")<EOL><DEDENT>
Synchronize LDAP users with local user model.
f10713:c0:m4
def user_active_directory_enabled(user, attributes, created, updated):
try:<EOL><INDENT>user_account_control = int(attributes['<STR_LIT>'][<NUM_LIT:0>])<EOL>if user_account_control & <NUM_LIT:2>:<EOL><INDENT>user.is_active = False<EOL><DEDENT>else:<EOL><INDENT>user.is_active = True<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>
Activate/deactivate user accounts based on Active Directory's userAccountControl flags. Requires 'userAccountControl' to be included in LDAP_SYNC_USER_EXTRA_ATTRIBUTES.
f10717:m0
def removed_user_deactivate(user):
if user.is_active:<EOL><INDENT>user.is_active = False<EOL>user.save()<EOL><DEDENT>
Deactivate user accounts that no longer appear in the source LDAP server.
f10717:m1
def removed_user_delete(user):
user.delete()<EOL>
Delete user accounts that no longer appear in the source LDAP server.
f10717:m2
def search(self, filterstr, attrlist):
return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr,<EOL>attrlist=attrlist, page_size=self.settings.PAGE_SIZE)<EOL>
Query the configured LDAP server.
f10718:c0:m5
def _paged_search_ext_s(self, base, scope, filterstr='<STR_LIT>', attrlist=None, attrsonly=<NUM_LIT:0>,<EOL>serverctrls=None, clientctrls=None, timeout=-<NUM_LIT:1>, sizelimit=<NUM_LIT:0>, page_size=<NUM_LIT:10>):
request_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='<STR_LIT>')<EOL>results = []<EOL>while True:<EOL><INDENT>msgid = self.conn.search_ext(base, scope, filterstr=filterstr, attrlist=attrlist, attrsonly=attrsonly,<EOL>serverctrls=(serverctrls or []) + [request_ctrl], clientctrls=clientctrls,<EOL>timeout=timeout, sizelimit=sizelimit)<EOL>result_type, result_data, result_msgid, result_ctrls = self.conn.result3(msgid)<EOL>results.extend(result_data)<EOL>paged_ctrls = [c for c in result_ctrls if c.controlType == SimplePagedResultsControl.controlType]<EOL>if paged_ctrls and paged_ctrls[<NUM_LIT:0>].cookie:<EOL><INDENT>request_ctrl.cookie = paged_ctrls[<NUM_LIT:0>].cookie<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return results<EOL>
Behaves similarly to LDAPObject.search_ext_s() but internally uses the simple paged results control to retrieve search results in chunks. Taken from the python-ldap paged_search_ext_s.py demo, showing how to use the paged results control: https://bitbucket.org/jaraco/python-ldap/
f10718:c0:m6
def __init__(self, prefix='<STR_LIT>'):
for name, default in self.defaults.items():<EOL><INDENT>value = getattr(settings, prefix + name, default)<EOL>setattr(self, name, value)<EOL><DEDENT>self.validate()<EOL>
Load settings from Django configuration.
f10721:c0:m0
def validate(self):
if self.GROUP_ATTRIBUTES and self.GROUPNAME_FIELD not in self.GROUP_ATTRIBUTES.values():<EOL><INDENT>raise ImproperlyConfigured("<STR_LIT>" % self.GROUPNAME_FIELD)<EOL><DEDENT>if not self.model._meta.get_field(self.USERNAME_FIELD).unique:<EOL><INDENT>raise ImproperlyConfigured("<STR_LIT>" % self.USERNAME_FIELD)<EOL><DEDENT>if self.USER_ATTRIBUTES and self.USERNAME_FIELD not in self.USER_ATTRIBUTES.values():<EOL><INDENT>raise ImproperlyConfigured("<STR_LIT>" % self.USERNAME_FIELD)<EOL><DEDENT>
Apply validation rules for loaded settings.
f10721:c0:m1
@task<EOL>def clean(docs=False, bytecode=False, extra='<STR_LIT>'):
patterns = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for pattern in patterns:<EOL><INDENT>print('<STR_LIT>'.format(pattern))<EOL>lrun('<STR_LIT>'.format(pattern))<EOL><DEDENT>
Cleanup all build artifacts
f10738:m1
@task<EOL>def cover():
lrun('<STR_LIT>'<EOL>'<STR_LIT>', pty=True)<EOL>
Run tests suite with coverage
f10738:m3
@task<EOL>def tox():
lrun('<STR_LIT>', pty=True)<EOL>
Run test in all Python versions
f10738:m4
@task<EOL>def qa():
lrun('<STR_LIT>')<EOL>
Run a quality report
f10738:m5
@task<EOL>def doc():
lrun('<STR_LIT>', pty=True)<EOL>
Build the documentation
f10738:m6
@task<EOL>def completion():
lrun('<STR_LIT>', pty=True)<EOL>
Generate bash completion script
f10738:m7
@task<EOL>def dist():
lrun('<STR_LIT>', pty=True)<EOL>
Package for distribution
f10738:m8
@task(clean, tox, doc, qa, dist, default=True)<EOL>def all():
pass<EOL>
Run tests, reports and packaging
f10738:m9
def rst(filename):
content = codecs.open(filename, encoding='<STR_LIT:utf-8>').read()<EOL>for regex, replacement in PYPI_RST_FILTERS:<EOL><INDENT>content = re.sub(regex, replacement, content)<EOL><DEDENT>return content<EOL>
Load rst file and sanitize it for PyPI. Remove unsupported github tags: - code-block directive - all badges
f10739:m0
def resolve_pattern(pattern):
if os.path.exists(pattern) and os.path.isdir(pattern):<EOL><INDENT>pattern = os.path.join(pattern, '<STR_LIT>')<EOL><DEDENT>return recursive_glob(pattern)<EOL>
Resolve a glob pattern into a filelist
f10740:m1
@click.command(context_settings=CONTEXT_SETTINGS)<EOL>@click.argument('<STR_LIT>', nargs=-<NUM_LIT:1>)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.INT, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Path(), help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Path(), help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Path(), help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Path(), help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:r>'), help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', default=DEFAULT_UNIT,<EOL>type=click.Choice(UNIT_PERCENTS + UNIT_SECONDS),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.INT, default=DEFAULT_PRECISION,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>def cli(patterns, times, json, csv, rst, md, ref, unit, precision, debug):
if ref:<EOL><INDENT>ref = JSON.load(ref)<EOL><DEDENT>filenames = []<EOL>reporters = [CliReporter(ref=ref, debug=debug, unit=unit, precision=precision)]<EOL>kwargs = {}<EOL>for pattern in patterns or ['<STR_LIT>']:<EOL><INDENT>filenames.extend(resolve_pattern(pattern))<EOL><DEDENT>if json:<EOL><INDENT>reporters.append(JsonReporter(json, precision=precision))<EOL><DEDENT>if csv:<EOL><INDENT>reporters.append(CsvReporter(csv, precision=precision))<EOL><DEDENT>if rst:<EOL><INDENT>reporters.append(RstReporter(rst, precision=precision))<EOL><DEDENT>if md:<EOL><INDENT>reporters.append(MarkdownReporter(md, precision=precision))<EOL><DEDENT>if times:<EOL><INDENT>kwargs['<STR_LIT>'] = times<EOL><DEDENT>runner = BenchmarkRunner(*filenames, reporters=reporters, debug=debug)<EOL>runner.run(**kwargs)<EOL>
Execute minibench benchmarks
f10740:m2