INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Apply standard deviation filter to remove anomalous values.
def noise_despike(sig, win=3, nlim=24., maxiter=4): """ Apply standard deviation filter to remove anomalous values. Parameters ---------- win : int The window used to calculate rolling statistics. nlim : float The number of standard deviations above the rolling mean above which data are considered outliers. Returns ------- None """ if win % 2 != 1: win += 1 # win must be odd kernel = np.ones(win) / win # make convolution kernel over = np.ones(len(sig), dtype=bool) # initialize bool array # pad edges to avoid edge-effects npad = int((win - 1) / 2) over[:npad] = False over[-npad:] = False # set up monitoring nloops = 0 # do the despiking while any(over) and (nloops < maxiter): rmean = np.convolve(sig, kernel, 'valid') # mean by convolution rstd = rmean**0.5 # std = sqrt(signal), because count statistics # identify where signal > mean + std * nlim (OR signa < mean - std * # nlim) # | (sig[npad:-npad] < rmean - nlim * rstd) over[npad:-npad] = (sig[npad:-npad] > rmean + nlim * rstd) # if any are over, replace them with mean of neighbours if any(over): # replace with values either side # sig[over] = sig[np.roll(over, -1) | np.roll(over, 1)].reshape((sum(over), 2)).mean(1) # replace with mean sig[npad:-npad][over[npad:-npad]] = rmean[over[npad:-npad]] nloops += 1 # repeat until no more removed. return sig
Apply exponential decay filter to remove physically impossible data based on instrumental washout.
def expdecay_despike(sig, expdecay_coef, tstep, maxiter=3): """ Apply exponential decay filter to remove physically impossible data based on instrumental washout. The filter is re-applied until no more points are removed, or maxiter is reached. Parameters ---------- exponent : float Exponent used in filter tstep : float The time increment between data points. maxiter : int The maximum number of times the filter should be applied. Returns ------- None """ # determine rms noise of data noise = np.std(sig[:5]) # initially, calculated based on first 5 points # expand the selection up to 50 points, unless it dramatically increases # the std (i.e. catches the 'laser on' region) for i in [10, 20, 30, 50]: inoise = np.std(sig[:i]) if inoise < 1.5 * noise: noise = inoise rms_noise3 = 3 * noise i = 0 f = True while (i < maxiter) and f: # calculate low and high possibles values based on exponential decay siglo = np.roll(sig * np.exp(tstep * expdecay_coef), 1) sighi = np.roll(sig * np.exp(-tstep * expdecay_coef), -1) # identify points that are outside these limits, beyond what might be explained # by noise in the data loind = (sig < siglo - rms_noise3) & (sig < np.roll(sig, -1) - rms_noise3) hiind = (sig > sighi + rms_noise3) & (sig > np.roll(sig, 1) + rms_noise3) # replace all such values with their preceding sig[loind] = sig[np.roll(loind, -1)] sig[hiind] = sig[np.roll(hiind, -1)] f = any(np.concatenate([loind, hiind])) i += 1 return sig
** f ** must return the same stack type as ** self. value ** has. Iterates over the effects sequences the inner instance successively to the top and joins with the outer instance. Example: List ( Right ( Just ( 1 ))) = > List ( Right ( Just ( List ( Right ( Just ( 5 )))))) = > List ( List ( Right ( Just ( Right ( Just ( 5 )))))) = > List ( Right ( Just ( Right ( Just ( 5 ))))) = > List ( Right ( Right ( Just ( Just ( 5 ))))) = > List ( Right ( Just ( Just ( 5 )))) = > List ( Right ( Just ( 5 ))) Note: IO works only as outermost effect as it cannot sequence
def _flat_map(self, f: Callable): ''' **f** must return the same stack type as **self.value** has. Iterates over the effects, sequences the inner instance successively to the top and joins with the outer instance. Example: List(Right(Just(1))) => List(Right(Just(List(Right(Just(5)))))) => List(List(Right(Just(Right(Just(5)))))) => List(Right(Just(Right(Just(5))))) => List(Right(Right(Just(Just(5))))) => List(Right(Just(Just(5)))) => List(Right(Just(5))) Note: IO works only as outermost effect, as it cannot sequence ''' index = List.range(self.depth + 1) g = index.fold_left(f)(lambda z, i: lambda a: a.map(z)) nested = g(self.value) def sequence_level(z, depth, tpe): nesting = lambda z, i: lambda a: a.map(z).sequence(tpe) lifter = List.range(depth).fold_left(I)(nesting) return z // lifter def sequence_type(z, data): return lambda a: sequence_level(a, *data).map(z) h = self.all_effects.reversed.with_index.fold_left(I)(sequence_type) return h(nested)
Add filter.
def add(self, name, filt, info='', params=(), setn=None): """ Add filter. Parameters ---------- name : str filter name filt : array_like boolean filter array info : str informative description of the filter params : tuple parameters used to make the filter Returns ------- None """ iname = '{:.0f}_'.format(self.n) + name self.index[self.n] = iname if setn is None: setn = self.maxset + 1 self.maxset = setn if setn not in self.sets.keys(): self.sets[setn] = [iname] else: self.sets[setn].append(iname) # self.keys is not added to? self.components[iname] = filt self.info[iname] = info self.params[iname] = params for a in self.analytes: self.switches[a][iname] = False self.n += 1 return
Remove filter.
def remove(self, name=None, setn=None): """ Remove filter. Parameters ---------- name : str name of the filter to remove setn : int or True int: number of set to remove True: remove all filters in set that 'name' belongs to Returns ------- None """ if isinstance(name, int): name = self.index[name] if setn is not None: name = self.sets[setn] del self.sets[setn] elif isinstance(name, (int, str)): name = [name] if setn is True: for n in name: for k, v in self.sets.items(): if n in v: name.append([m for m in v if m != n]) for n in name: for k, v in self.sets.items(): if n in v: self.sets[k] = [m for m in v if n != m] del self.components[n] del self.info[n] del self.params[n] del self.keys[n] for a in self.analytes: del self.switches[a][n] return
Clear all filters.
def clear(self): """ Clear all filters. """ self.components = {} self.info = {} self.params = {} self.switches = {} self.keys = {} self.index = {} self.sets = {} self.maxset = -1 self.n = 0 for a in self.analytes: self.switches[a] = {} return
Remove unused filters.
def clean(self): """ Remove unused filters. """ for f in sorted(self.components.keys()): unused = not any(self.switches[a][f] for a in self.analytes) if unused: self.remove(f)
Turn on specified filter ( s ) for specified analyte ( s ).
def on(self, analyte=None, filt=None): """ Turn on specified filter(s) for specified analyte(s). Parameters ---------- analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. filt : optional. int, str or array_like Name/number or iterable names/numbers of filters. Returns ------- None """ if isinstance(analyte, str): analyte = [analyte] if isinstance(filt, (int, float)): filt = [filt] elif isinstance(filt, str): filt = self.fuzzmatch(filt, multi=True) if analyte is None: analyte = self.analytes if filt is None: filt = list(self.index.values()) for a in analyte: for f in filt: if isinstance(f, (int, float)): f = self.index[int(f)] try: self.switches[a][f] = True except KeyError: f = self.fuzzmatch(f, multi=False) self.switches[a][f] = True # for k in self.switches[a].keys(): # if f in k: # self.switches[a][k] = True return
Make filter for specified analyte ( s ).
def make(self, analyte): """ Make filter for specified analyte(s). Filter specified in filt.switches. Parameters ---------- analyte : str or array_like Name or list of names of analytes. Returns ------- array_like boolean filter """ if analyte is None: analyte = self.analytes elif isinstance(analyte, str): analyte = [analyte] out = [] for f in self.components.keys(): for a in analyte: if self.switches[a][f]: out.append(f) key = ' & '.join(sorted(out)) for a in analyte: self.keys[a] = key return self.make_fromkey(key)
Identify a filter by fuzzy string matching.
def fuzzmatch(self, fuzzkey, multi=False): """ Identify a filter by fuzzy string matching. Partial ('fuzzy') matching performed by `fuzzywuzzy.fuzzy.ratio` Parameters ---------- fuzzkey : str A string that partially matches one filter name more than the others. Returns ------- The name of the most closely matched filter. : str """ keys, ratios = np.array([(f, seqm(None, fuzzkey, f).ratio()) for f in self.components.keys()]).T mratio = max(ratios) if multi: return keys[ratios == mratio] else: if sum(ratios == mratio) == 1: return keys[ratios == mratio][0] else: raise ValueError("\nThe filter key provided ('{:}') matches two or more filter names equally well:\n".format(fuzzkey) + ', '.join(keys[ratios == mratio]) + "\nPlease be more specific!")
Make filter from logical expression.
def make_fromkey(self, key): """ Make filter from logical expression. Takes a logical expression as an input, and returns a filter. Used for advanced filtering, where combinations of nested and/or filters are desired. Filter names must exactly match the names listed by print(filt). Example: ``key = '(Filter_1 | Filter_2) & Filter_3'`` is equivalent to: ``(Filter_1 OR Filter_2) AND Filter_3`` statements in parentheses are evaluated first. Parameters ---------- key : str logical expression describing filter construction. Returns ------- array_like boolean filter """ if key != '': def make_runable(match): return "self.components['" + self.fuzzmatch(match.group(0)) + "']" runable = re.sub('[^\(\)|& ]+', make_runable, key) return eval(runable) else: return ~np.zeros(self.size, dtype=bool)
Make logical expressions describing the filter ( s ) for specified analyte ( s ).
def make_keydict(self, analyte=None): """ Make logical expressions describing the filter(s) for specified analyte(s). Parameters ---------- analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. Returns ------- dict containing the logical filter expression for each analyte. """ if analyte is None: analyte = self.analytes elif isinstance(analyte, str): analyte = [analyte] out = {} for a in analyte: key = [] for f in self.components.keys(): if self.switches[a][f]: key.append(f) out[a] = ' & '.join(sorted(key)) self.keydict = out return out
Flexible access to specific filter using any key format.
def grab_filt(self, filt, analyte=None): """ Flexible access to specific filter using any key format. Parameters ---------- f : str, dict or bool either logical filter expression, dict of expressions, or a boolean analyte : str name of analyte the filter is for. Returns ------- array_like boolean filter """ if isinstance(filt, str): if filt in self.components: if analyte is None: return self.components[filt] else: if self.switches[analyte][filt]: return self.components[filt] else: try: ind = self.make_fromkey(filt) except KeyError: print(("\n\n***Filter key invalid. Please consult " "manual and try again.")) elif isinstance(filt, dict): try: ind = self.make_fromkey(filt[analyte]) except ValueError: print(("\n\n***Filter key invalid. Please consult manual " "and try again.\nOR\nAnalyte missing from filter " "key dict.")) elif filt: ind = self.make(analyte) else: ind = ~np.zeros(self.size, dtype=bool) return ind
Extract filter components for specific analyte ( s ).
def get_components(self, key, analyte=None): """ Extract filter components for specific analyte(s). Parameters ---------- key : str string present in one or more filter names. e.g. 'Al27' will return all filters with 'Al27' in their names. analyte : str name of analyte the filter is for Returns ------- boolean filter : array-like """ out = {} for k, v in self.components.items(): if key in k: if analyte is None: out[k] = v elif self.switches[analyte][k]: out[k] = v return out
Get info for all filters.
def get_info(self): """ Get info for all filters. """ out = '' for k in sorted(self.components.keys()): out += '{:s}: {:s}'.format(k, self.info[k]) + '\n' return(out)
Load data_file described by a dataformat dict.
def read_data(data_file, dataformat, name_mode): """ Load data_file described by a dataformat dict. Parameters ---------- data_file : str Path to data file, including extension. dataformat : dict A dataformat dict, see example below. name_mode : str How to identyfy sample names. If 'file_names' uses the input name of the file, stripped of the extension. If 'metadata_names' uses the 'name' attribute of the 'meta' sub-dictionary in dataformat. If any other str, uses this str as the sample name. Example ------- >>> {'genfromtext_args': {'delimiter': ',', 'skip_header': 4}, # passed directly to np.genfromtxt 'column_id': {'name_row': 3, # which row contains the column names 'delimiter': ',', # delimeter between column names 'timecolumn': 0, # which column contains the 'time' variable 'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names 'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs 0: (['path'], '(.*)'), 2: (['date', 'method'], # MUST include date '([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\.m)') } } Returns ------- sample, analytes, data, meta : tuple """ with open(data_file) as f: lines = f.readlines() if 'meta_regex' in dataformat.keys(): meta = Bunch() for k, v in dataformat['meta_regex'].items(): try: out = re.search(v[-1], lines[int(k)]).groups() except: raise ValueError('Failed reading metadata when applying:\n regex: {}\nto\n line: {}'.format(v[-1], lines[int(k)])) for i in np.arange(len(v[0])): meta[v[0][i]] = out[i] else: meta = {} # sample name if name_mode == 'file_names': sample = os.path.basename(data_file).split('.')[0] elif name_mode == 'metadata_names': sample = meta['name'] else: sample = name_mode # column and analyte names columns = np.array(lines[dataformat['column_id']['name_row']].strip().split( dataformat['column_id']['delimiter'])) if 'pattern' in dataformat['column_id'].keys(): pr = re.compile(dataformat['column_id']['pattern']) analytes = [pr.match(c).groups()[0] for c in columns if pr.match(c)] # do any required pre-formatting if 'preformat_replace' in dataformat.keys(): with open(data_file) as f: fbuffer = f.read() for k, v in dataformat['preformat_replace'].items(): fbuffer = re.sub(k, v, fbuffer) # dead data read_data = np.genfromtxt(BytesIO(fbuffer.encode()), **dataformat['genfromtext_args']).T else: # read data read_data = np.genfromtxt(data_file, **dataformat['genfromtext_args']).T # data dict dind = np.zeros(read_data.shape[0], dtype=bool) for a in analytes: dind[columns == a] = True data = Bunch() data['Time'] = read_data[dataformat['column_id']['timecolumn']] # deal with time units if 'time_unit' in dataformat['column_id']: if isinstance(dataformat['column_id']['time_unit'], (float, int)): time_mult = dataformat['column_id']['time_unit'] elif isinstance(dataformat['column_id']['time_unit'], str): unit_multipliers = {'ms': 1/1000, 'min': 60/1, 's': 1} try: time_mult = unit_multipliers[dataformat['column_id']['time_unit']] except: raise ValueError("In dataformat: time_unit must be a number, 'ms', 'min' or 's'") data['Time'] *= time_mult # convert raw data into counts # TODO: Is this correct? Should actually be per-analyte dwell? # if 'unit' in dataformat: # if dataformat['unit'] == 'cps': # tstep = data['Time'][1] - data['Time'][0] # read_data[dind] *= tstep # else: # pass data['rawdata'] = Bunch(zip(analytes, read_data[dind])) data['total_counts'] = np.nansum(read_data[dind], 0) return sample, analytes, data, meta
Function for plotting Test User and LAtools data comparison.
def residual_plots(df, rep_stats=None, els=['Mg', 'Sr', 'Al', 'Mn', 'Fe', 'Cu', 'Zn', 'B']): """ Function for plotting Test User and LAtools data comparison. Parameters ---------- df : pandas.DataFrame A dataframe containing reference ('X/Ca_r'), test user ('X/Ca_t') and LAtools ('X123') data. rep_stats : dict Reproducibility stats of the reference data produced by `pairwise_reproducibility` els : list list of elements (names only) to plot. """ # get corresponding analyte and ratio names As = [] Rs = [] analytes = [c for c in df.columns if ('/' not in c)] ratios = [c for c in df.columns if ('/' in c)] for e in els: if e == 'Sr': As.append('88Sr') elif e == 'Mg': As.append('24Mg') else: As.append([a for a in analytes if e in a][0]) Rs.append([r for r in ratios if e in r][0]) fig, axs = plt.subplots(len(els), 2, figsize=(5, len(els) * 2)) for i, (e, a) in enumerate(zip(Rs, As)): lax, hax = axs[i] x = df.loc[:, e].values yl = df.loc[:, a].values c = element_colour(fmt_el(a)) u = 'mmol/mol' # calculate residuals rl = yl - x # plot residuals lax.scatter(x, rl, c=c, s=15, lw=0.5, edgecolor='k', alpha=0.5) # plot PDFs rl = rl[~np.isnan(rl)] lims = np.percentile(rl, [99, 1]) lims += lims.ptp() * np.array((-1.25, 1.25)) bins = np.linspace(*lims, 100) kdl = stats.gaussian_kde(rl, .4) hax.fill_betweenx(bins, kdl(bins), facecolor=c, alpha=0.7, edgecolor='k', lw=0.5, label='LAtools') hax.set_xlim([0, hax.get_xlim()[-1]]) # axis labels, annotations and limits lax.set_ylabel(e + ' ('+ u + ')') lax.text(.02,.02,fmt_RSS(rl), fontsize=8, ha='left', va='bottom', transform=lax.transAxes) xlim = np.percentile(x[~np.isnan(x)], [0, 98]) lax.set_xlim(xlim) for ax in axs[i]: ax.set_ylim(lims) # zero line and 2SD precision ax.axhline(0, c='k', ls='dashed', alpha=0.6) if rep_stats is not None: ax.axhspan(-rep_stats[e][0] * 2, rep_stats[e][0] * 2, color=(0,0,0,0.2), zorder=-1) if not ax.is_first_col(): ax.set_yticklabels([]) if ax.is_last_row(): hax.set_xlabel('Density') lax.set_xlabel('Iolite User') if ax.is_first_row(): lax.set_title('LAtools', loc='left') fig.tight_layout() return fig, axs
Compute comparison stats for test and LAtools data. Population - level similarity assessed by a Kolmogorov - Smirnov test. Individual similarity assessed by a pairwise Wilcoxon signed rank test. Trends in residuals assessed by regression analysis where significance of the slope and intercept is determined by t - tests ( both relative to zero ). Parameters ---------- df: pandas. DataFrame A dataframe containing reference ( X/ Ca_r ) test user ( X/ Ca_t ) and LAtools ( X123 ) data. els: list list of elements ( names only ) to plot. Returns ------- pandas. DataFrame
def comparison_stats(df, els=None): """ Compute comparison stats for test and LAtools data. Population-level similarity assessed by a Kolmogorov-Smirnov test. Individual similarity assessed by a pairwise Wilcoxon signed rank test. Trends in residuals assessed by regression analysis, where significance of the slope and intercept is determined by t-tests (both relative to zero). Parameters ---------- df : pandas.DataFrame A dataframe containing reference ('X/Ca_r'), test user ('X/Ca_t') and LAtools ('X123') data. els : list list of elements (names only) to plot. Returns ------- pandas.DataFrame """ if els is None: els = ['Li', 'Mg', 'Al', 'P', 'Ti', 'Y', 'La', 'Ce', 'Pr', 'Nd', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Pb', 'Th', 'U'] yl_stats = [] for i, e in enumerate(els): x = df.loc[:, e + '_rd'].values yl = df.loc[:, e + '_la'].values yl_stats.append(summary_stats(x, yl, e)) yl_stats = pd.concat(yl_stats).T return yl_stats.T
Function for logging method calls and parameters
def _log(func): """ Function for logging method calls and parameters """ @wraps(func) def wrapper(self, *args, **kwargs): a = func(self, *args, **kwargs) self.log.append(func.__name__ + ' :: args={} kwargs={}'.format(args, kwargs)) return a return wrapper
Write and analysis log to a file.
def write_logfile(log, header, file_name): """ Write and analysis log to a file. Parameters ---------- log : list latools.analyse analysis log header : list File header lines. file_name : str Destination file. If no file extension specified, uses '.lalog' Returns ------- None """ path, ext = os.path.splitext(file_name) if ext == '': ext = '.lalog' with open(path + ext, 'w') as f: f.write('\n'.join(header)) f.write('\n'.join(log)) return path + ext
Reads an latools analysis. log file and returns dicts of arguments.
def read_logfile(log_file): """ Reads an latools analysis.log file, and returns dicts of arguments. Parameters ---------- log_file : str Path to an analysis.log file produced by latools. Returns ------- runargs, paths : tuple Two dictionaries. runargs contains all the arguments required to run each step of analysis in the form (function_name, {'args': (), 'kwargs': {}}). paths contains the locations of the data directory and the SRM database used for analysis. """ dirname = os.path.dirname(log_file) + '/' with open(log_file, 'r') as f: rlog = f.readlines() hashind = [i for i, n in enumerate(rlog) if '#' in n] pathread = re.compile('(.*) :: (.*)\n') paths = (pathread.match(l).groups() for l in rlog[hashind[0] + 1:hashind[-1]] if pathread.match(l)) paths = {k: os.path.join(dirname, v) for k, v in paths} # paths = {k: os.path.abspath(v) for k, v in paths} logread = re.compile('([a-z_]+) :: args=(\(.*\)) kwargs=(\{.*\})') runargs = [] for line in rlog[hashind[1] + 1:]: fname, args, kwargs = (logread.match(line).groups()) runargs.append((fname ,{'args': eval(args), 'kwargs': eval(kwargs)})) if fname == '__init__': runargs[-1][-1]['kwargs']['config'] = 'REPRODUCE' runargs[-1][-1]['kwargs']['dataformat'] = None runargs[-1][-1]['kwargs']['data_folder'] = paths['data_folder'] if 'srm_table' in paths: runargs[-1][-1]['kwargs']['srm_file'] = paths['srm_table'] return runargs, paths
Compresses the target directory and saves it to../ name. zip
def zipdir(directory, name=None, delete=False): """ Compresses the target directory, and saves it to ../name.zip Parameters ---------- directory : str Path to the directory you want to compress. Compressed file will be saved at directory/../name.zip name : str (default=None) The name of the resulting zip file. If not specified, the name of the directory to be compressed is used. delete : bool If True, the uncompressed directory is deleted after the zip file has been created. Defaults to False. Returns ------- None """ if not os.path.isdir(directory) or not os.path.exists(directory): raise ValueError('Please provide a valid directory.') if name is None: name = directory.split('/')[-1] savepath = os.path.join(directory, os.path.pardir) # create zipfile with zipfile.ZipFile(os.path.join(savepath, name + '.zip'), 'w', zipfile.ZIP_DEFLATED) as zipf: for root, dirs, files in os.walk(directory): for f in files: zipf.write(os.path.join(root, f), os.path.join(root.replace(directory, ''), f)) if delete: shutil.rmtree(directory) return None
Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file: str Path to zip file Returns ------- str: folder where the zip was extracted
def extract_zipdir(zip_file): """ Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file : str Path to zip file Returns ------- str : folder where the zip was extracted """ if not os.path.exists(zip_file): raise ValueError('{} does not exist'.format(zip_file)) directory = os.path.dirname(zip_file) filename = os.path.basename(zip_file) dirpath = os.path.join(directory, filename.replace('.zip', '')) with zipfile.ZipFile(zip_file, 'r', zipfile.ZIP_DEFLATED) as zipf: zipf.extractall(dirpath) return dirpath
Decorator that will try to login and redo an action before failing.
def autologin(function, timeout=TIMEOUT): """Decorator that will try to login and redo an action before failing.""" @wraps(function) async def wrapper(self, *args, **kwargs): """Wrap a function with timeout.""" try: async with async_timeout.timeout(timeout): return await function(self, *args, **kwargs) except (asyncio.TimeoutError, ClientError, Error): pass _LOGGER.debug("autologin") try: async with async_timeout.timeout(timeout): await self.login() return await function(self, *args, **kwargs) except (asyncio.TimeoutError, ClientError, Error): raise Error(str(function)) return wrapper
Example of printing the inbox.
async def get_information(): """Example of printing the inbox.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) result = await modem.information() for sms in result.sms: pprint.pprint(sms) await modem.logout() await websession.close()
Example of sending a message.
async def send_message(): """Example of sending a message.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) await modem.sms(phone=sys.argv[3], message=sys.argv[4]) await modem.logout() await websession.close()
Example of printing the current upstream.
async def get_information(): """Example of printing the current upstream.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) try: modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) result = await modem.information() print("upstream: {}".format(result.upstream)) print("serial_number: {}".format(result.serial_number)) print("wire_connected: {}".format(result.wire_connected)) print("mobile_connected: {}".format(result.mobile_connected)) print("connection_text: {}".format(result.connection_text)) print("connection_type: {}".format(result.connection_type)) print("current_nw_service_type: {}".format(result.current_nw_service_type)) print("current_ps_service_type: {}".format(result.current_ps_service_type)) print("register_network_display: {}".format(result.register_network_display)) print("roaming: {}".format(result.roaming)) print("radio_quality: {}".format(result.radio_quality)) print("rx_level: {}".format(result.rx_level)) print("tx_level: {}".format(result.tx_level)) print("current_band: {}".format(result.current_band)) print("cell_id: {}".format(result.cell_id)) await modem.logout() except eternalegypt.Error: print("Could not login") await websession.close()
Example of printing the current upstream.
async def set_failover_mode(mode): """Example of printing the current upstream.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) try: modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) await modem.set_failover_mode(mode) await modem.logout() except eternalegypt.Error: print("Could not login") await websession.close()
Parse a file - like object or string.
def parse(file_or_string): """Parse a file-like object or string. Args: file_or_string (file, str): File-like object or string. Returns: ParseResults: instance of pyparsing parse results. """ from mysqlparse.grammar.sql_file import sql_file_syntax if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'): return sql_file_syntax.parseString(file_or_string.read()) elif isinstance(file_or_string, six.string_types): return sql_file_syntax.parseString(file_or_string) else: raise TypeError("Expected file-like or string object, but got '{type_name}' instead.".format( type_name=type(file_or_string).__name__, ))
Return the link to the Jupyter nbviewer for the given notebook url
def nbviewer_link(url): """Return the link to the Jupyter nbviewer for the given notebook url""" if six.PY2: from urlparse import urlparse as urlsplit else: from urllib.parse import urlsplit info = urlsplit(url) domain = info.netloc url_type = 'github' if domain == 'github.com' else 'url' return 'https://nbviewer.jupyter.org/%s%s' % (url_type, info.path)
The string for creating the thumbnail of this example
def thumbnail_div(self): """The string for creating the thumbnail of this example""" return self.THUMBNAIL_TEMPLATE.format( snippet=self.get_description()[1], thumbnail=self.thumb_file, ref_name=self.reference)
The string for creating a code example for the gallery
def code_div(self): """The string for creating a code example for the gallery""" code_example = self.code_example if code_example is None: return None return self.CODE_TEMPLATE.format( snippet=self.get_description()[1], code=code_example, ref_name=self.reference)
The code example out of the notebook metadata
def code_example(self): """The code example out of the notebook metadata""" if self._code_example is not None: return self._code_example return getattr(self.nb.metadata, 'code_example', None)
The supplementary files of this notebook
def supplementary_files(self): """The supplementary files of this notebook""" if self._supplementary_files is not None: return self._supplementary_files return getattr(self.nb.metadata, 'supplementary_files', None)
The supplementary files of this notebook
def other_supplementary_files(self): """The supplementary files of this notebook""" if self._other_supplementary_files is not None: return self._other_supplementary_files return getattr(self.nb.metadata, 'other_supplementary_files', None)
The url on jupyter nbviewer for this notebook or None if unknown
def url(self): """The url on jupyter nbviewer for this notebook or None if unknown""" if self._url is not None: url = self._url else: url = getattr(self.nb.metadata, 'url', None) if url is not None: return nbviewer_link(url)
get the output file with the specified ending
def get_out_file(self, ending='rst'): """get the output file with the specified `ending`""" return os.path.splitext(self.outfile)[0] + os.path.extsep + ending
Process the notebook and create all the pictures and files
def process_notebook(self, disable_warnings=True): """Process the notebook and create all the pictures and files This method runs the notebook using the :mod:`nbconvert` and :mod:`nbformat` modules. It creates the :attr:`outfile` notebook, a python and a rst file""" infile = self.infile outfile = self.outfile in_dir = os.path.dirname(infile) + os.path.sep odir = os.path.dirname(outfile) + os.path.sep create_dirs(os.path.join(odir, 'images')) ep = nbconvert.preprocessors.ExecutePreprocessor( timeout=300) cp = nbconvert.preprocessors.ClearOutputPreprocessor( timeout=300) self.nb = nb = nbformat.read(infile, nbformat.current_nbformat) # disable warnings in the rst file if disable_warnings: for i, cell in enumerate(nb.cells): if cell['cell_type'] == 'code': cell = cell.copy() break cell = cell.copy() cell.source = """ import logging logging.captureWarnings(True) logging.getLogger('py.warnings').setLevel(logging.ERROR) """ nb.cells.insert(i, cell) # write and process rst_file if self.preprocess: t = dt.datetime.now() logger.info('Processing %s', self.infile) try: ep.preprocess(nb, {'metadata': {'path': in_dir}}) except nbconvert.preprocessors.execute.CellExecutionError: logger.critical( 'Error while processing %s!', self.infile, exc_info=True) else: logger.info('Done. Seconds needed: %i', (dt.datetime.now() - t).seconds) if disable_warnings: nb.cells.pop(i) self.py_file = self.get_out_file('py') if self.remove_tags: tp = nbconvert.preprocessors.TagRemovePreprocessor(timeout=300) for key, val in self.tag_options.items(): setattr(tp, key, set(val)) nb4rst = deepcopy(nb) tp.preprocess(nb4rst, {'metadata': {'path': in_dir}}) else: nb4rst = nb self.create_rst(nb4rst, in_dir, odir) if self.clear: cp.preprocess(nb, {'metadata': {'path': in_dir}}) # write notebook file nbformat.write(nb, outfile) self.create_py(nb)
Create the rst file from the notebook node
def create_rst(self, nb, in_dir, odir): """Create the rst file from the notebook node""" raw_rst, resources = nbconvert.export_by_name('rst', nb) # remove ipython magics rst_content = '' i0 = 0 m = None # HACK: we insert the bokeh style sheets here as well, since for some # themes (e.g. the sphinx_rtd_theme) it is not sufficient to include # the style sheets only via app.add_stylesheet bokeh_str = '' if 'bokeh' in raw_rst and self.insert_bokeh: bokeh_str += self.BOKEH_TEMPLATE.format( version=self.insert_bokeh) if 'bokeh' in raw_rst and self.insert_bokeh_widgets: bokeh_str += self.BOKEH_WIDGETS_TEMPLATE.format( version=self.insert_bokeh_widgets) for m in code_blocks.finditer(raw_rst): lines = m.group().splitlines(True) header, content = lines[0], ''.join(lines[1:]) no_magics = magic_patt.sub('\g<1>', content) # if the code cell only contained magic commands, we skip it if no_magics.strip(): rst_content += ( raw_rst[i0:m.start()] + bokeh_str + header + no_magics) bokeh_str = '' i0 = m.end() else: rst_content += raw_rst[i0:m.start()] i0 = m.end() if m is not None: rst_content += bokeh_str + raw_rst[m.end():] else: rst_content = raw_rst rst_content = '.. _%s:\n\n' % self.reference + \ rst_content url = self.url if url is not None: rst_content += self.CODE_DOWNLOAD_NBVIEWER.format( pyfile=os.path.basename(self.py_file), nbfile=os.path.basename(self.outfile), url=url) else: rst_content += self.CODE_DOWNLOAD.format( pyfile=os.path.basename(self.py_file), nbfile=os.path.basename(self.outfile)) supplementary_files = self.supplementary_files other_supplementary_files = self.other_supplementary_files if supplementary_files or other_supplementary_files: for f in (supplementary_files or []) + ( other_supplementary_files or []): if not os.path.exists(os.path.join(odir, f)): copyfile(os.path.join(in_dir, f), os.path.join(odir, f)) if supplementary_files: rst_content += self.data_download(supplementary_files) rst_file = self.get_out_file() outputs = sorted(resources['outputs'], key=rst_content.find) base = os.path.join('images', os.path.splitext( os.path.basename(self.infile))[0] + '_%i.png') out_map = {os.path.basename(original): base % i for i, original in enumerate(outputs)} for original, final in six.iteritems(out_map): rst_content = rst_content.replace(original, final) with open(rst_file, 'w') \ as f: f.write(rst_content.rstrip() + '\n') pictures = [] for original in outputs: fname = os.path.join(odir, out_map[os.path.basename(original)]) pictures.append(fname) if six.PY3: f = open(fname, 'w+b') else: f = open(fname, 'w') f.write(resources['outputs'][original]) f.close() self.pictures = pictures
Create the python script from the notebook node
def create_py(self, nb, force=False): """Create the python script from the notebook node""" # Although we would love to simply use ``nbconvert.export_python(nb)`` # this causes troubles in other cells processed by the ipython # directive. Instead of getting something like ``Out [5]:``, we get # some weird like '[0;31mOut[5]: ' which look like # color information if we allow the call of nbconvert.export_python if list(map(int, re.findall('\d+', nbconvert.__version__))) >= [4, 2]: py_file = os.path.basename(self.py_file) else: py_file = self.py_file try: level = logger.logger.level except AttributeError: level = logger.level spr.call(['jupyter', 'nbconvert', '--to=python', '--output=' + py_file, '--log-level=%s' % level, self.outfile]) with open(self.py_file) as f: py_content = f.read() # comment out ipython magics py_content = re.sub('^\s*get_ipython\(\).magic.*', '# \g<0>', py_content, flags=re.MULTILINE) with open(self.py_file, 'w') as f: f.write(py_content)
Create the rst string to download supplementary data
def data_download(self, files): """Create the rst string to download supplementary data""" if len(files) > 1: return self.DATA_DOWNLOAD % ( ('\n\n' + ' '*8) + ('\n' + ' '*8).join( '* :download:`%s`' % f for f in files)) return self.DATA_DOWNLOAD % ':download:`%s`' % files[0]
Create the thumbnail for html output
def create_thumb(self): """Create the thumbnail for html output""" thumbnail_figure = self.copy_thumbnail_figure() if thumbnail_figure is not None: if isinstance(thumbnail_figure, six.string_types): pic = thumbnail_figure else: pic = self.pictures[thumbnail_figure] self.save_thumbnail(pic) else: for pic in self.pictures[::-1]: if pic.endswith('png'): self.save_thumbnail(pic) return
Get summary and description of this notebook
def get_description(self): """Get summary and description of this notebook""" def split_header(s, get_header=True): s = s.lstrip().rstrip() parts = s.splitlines() if parts[0].startswith('#'): if get_header: header = re.sub('#+\s*', '', parts.pop(0)) if not parts: return header, '' else: header = '' rest = '\n'.join(parts).lstrip().split('\n\n') desc = rest[0].replace('\n', ' ') return header, desc else: if get_header: if parts[0].startswith(('=', '-')): parts = parts[1:] header = parts.pop(0) if parts and parts[0].startswith(('=', '-')): parts.pop(0) if not parts: return header, '' else: header = '' rest = '\n'.join(parts).lstrip().split('\n\n') desc = rest[0].replace('\n', ' ') return header, desc first_cell = self.nb['cells'][0] if not first_cell['cell_type'] == 'markdown': return '', '' header, desc = split_header(first_cell['source']) if not desc and len(self.nb['cells']) > 1: second_cell = self.nb['cells'][1] if second_cell['cell_type'] == 'markdown': _, desc = split_header(second_cell['source'], False) return header, desc
Scales an image with the same aspect ratio centered in an image with a given max_width and max_height if in_fname == out_fname the image can only be scaled down
def scale_image(self, in_fname, out_fname, max_width, max_height): """Scales an image with the same aspect ratio centered in an image with a given max_width and max_height if in_fname == out_fname the image can only be scaled down """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = max_width / float(width_in) scale_h = max_height / float(height_in) if height_in * scale_w <= max_height: scale = scale_w else: scale = scale_h if scale >= 1.0 and in_fname == out_fname: return width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255)) pos_insert = ( (max_width - width_sc) // 2, (max_height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname)
Save the thumbnail image
def save_thumbnail(self, image_path): """Save the thumbnail image""" thumb_dir = os.path.join(os.path.dirname(image_path), 'thumb') create_dirs(thumb_dir) thumb_file = os.path.join(thumb_dir, '%s_thumb.png' % self.reference) if os.path.exists(image_path): logger.info('Scaling %s to thumbnail %s', image_path, thumb_file) self.scale_image(image_path, thumb_file, 400, 280) self.thumb_file = thumb_file
The integer of the thumbnail figure
def copy_thumbnail_figure(self): """The integer of the thumbnail figure""" ret = None if self._thumbnail_figure is not None: if not isstring(self._thumbnail_figure): ret = self._thumbnail_figure else: ret = osp.join(osp.dirname(self.outfile), osp.basename(self._thumbnail_figure)) copyfile(self._thumbnail_figure, ret) return ret elif hasattr(self.nb.metadata, 'thumbnail_figure'): if not isstring(self.nb.metadata.thumbnail_figure): ret = self.nb.metadata.thumbnail_figure else: ret = osp.join(osp.dirname(self.outfile), 'images', osp.basename(self.nb.metadata.thumbnail_figure)) copyfile(osp.join(osp.dirname(self.infile), self.nb.metadata.thumbnail_figure), ret) return ret
Create the rst files from the input directories in the: attr: in_dir attribute
def process_directories(self): """Create the rst files from the input directories in the :attr:`in_dir` attribute""" for i, (base_dir, target_dir, paths) in enumerate(zip( self.in_dir, self.out_dir, map(os.walk, self.in_dir))): self._in_dir_count = i self.recursive_processing(base_dir, target_dir, paths)
Method to recursivly process the notebooks in the base_dir
def recursive_processing(self, base_dir, target_dir, it): """Method to recursivly process the notebooks in the `base_dir` Parameters ---------- base_dir: str Path to the base example directory (see the `examples_dir` parameter for the :class:`Gallery` class) target_dir: str Path to the output directory for the rst files (see the `gallery_dirs` parameter for the :class:`Gallery` class) it: iterable The iterator over the subdirectories and files in `base_dir` generated by the :func:`os.walk` function""" try: file_dir, dirs, files = next(it) except StopIteration: return '', [] readme_files = {'README.md', 'README.rst', 'README.txt'} if readme_files.intersection(files): foutdir = file_dir.replace(base_dir, target_dir) create_dirs(foutdir) this_nbps = [ NotebookProcessor( infile=f, outfile=os.path.join(foutdir, os.path.basename(f)), disable_warnings=self.disable_warnings, preprocess=( (self.preprocess is True or f in self.preprocess) and not (self.dont_preprocess is True or f in self.dont_preprocess)), clear=((self.clear is True or f in self.clear) and not (self.dont_clear is True or f in self.dont_clear)), code_example=self.code_examples.get(f), supplementary_files=self.supplementary_files.get(f), other_supplementary_files=self.osf.get(f), thumbnail_figure=self.thumbnail_figures.get(f), url=self.get_url(f.replace(base_dir, '')), **self._nbp_kws) for f in map(lambda f: os.path.join(file_dir, f), filter(self.pattern.match, files))] readme_file = next(iter(readme_files.intersection(files))) else: return '', [] labels = OrderedDict() this_label = 'gallery_' + foutdir.replace(os.path.sep, '_') if this_label.endswith('_'): this_label = this_label[:-1] for d in dirs: label, nbps = self.recursive_processing( base_dir, target_dir, it) if label: labels[label] = nbps s = ".. _%s:\n\n" % this_label with open(os.path.join(file_dir, readme_file)) as f: s += f.read().rstrip() + '\n\n' s += "\n\n.. toctree::\n\n" s += ''.join(' %s\n' % os.path.splitext(os.path.basename( nbp.get_out_file()))[0] for nbp in this_nbps) for d in dirs: findex = os.path.join(d, 'index.rst') if os.path.exists(os.path.join(foutdir, findex)): s += ' %s\n' % os.path.splitext(findex)[0] s += '\n' for nbp in this_nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" for label, nbps in labels.items(): s += '\n.. only:: html\n\n .. rubric:: :ref:`%s`\n\n' % ( label) for nbp in nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" s += '\n' with open(os.path.join(foutdir, 'index.rst'), 'w') as f: f.write(s) return this_label, list(chain(this_nbps, *labels.values()))
Class method to create a: class: Gallery instance from the configuration of a sphinx application
def from_sphinx(cls, app): """Class method to create a :class:`Gallery` instance from the configuration of a sphinx application""" app.config.html_static_path.append(os.path.join( os.path.dirname(__file__), '_static')) config = app.config.example_gallery_config insert_bokeh = config.get('insert_bokeh') if insert_bokeh: if not isstring(insert_bokeh): import bokeh insert_bokeh = bokeh.__version__ app.add_stylesheet( NotebookProcessor.BOKEH_STYLE_SHEET.format( version=insert_bokeh)) app.add_javascript( NotebookProcessor.BOKEH_JS.format(version=insert_bokeh)) insert_bokeh_widgets = config.get('insert_bokeh_widgets') if insert_bokeh_widgets: if not isstring(insert_bokeh_widgets): import bokeh insert_bokeh_widgets = bokeh.__version__ app.add_stylesheet( NotebookProcessor.BOKEH_WIDGETS_STYLE_SHEET.format( version=insert_bokeh_widgets)) app.add_javascript( NotebookProcessor.BOKEH_WIDGETS_JS.format( version=insert_bokeh_widgets)) if not app.config.process_examples: return cls(**app.config.example_gallery_config).process_directories()
Return the url corresponding to the given notebook file
def get_url(self, nbfile): """Return the url corresponding to the given notebook file Parameters ---------- nbfile: str The path of the notebook relative to the corresponding :attr:``in_dir`` Returns ------- str or None The url or None if no url has been specified """ urls = self.urls if isinstance(urls, dict): return urls.get(nbfile) elif isstring(urls): if not urls.endswith('/'): urls += '/' return urls + nbfile
command execution
def handle(self, *args, **options): """ command execution """ assume_yes = options.get('assume_yes', False) default_language = options.get('default_language', None) # set manual transaction management transaction.commit_unless_managed() transaction.enter_transaction_management() transaction.managed(True) self.cursor = connection.cursor() self.introspection = connection.introspection self.default_lang = default_language or mandatory_language() all_models = get_models() found_db_change_fields = False for model in all_models: if hasattr(model._meta, 'translatable_fields'): model_full_name = '%s.%s' % (model._meta.app_label, model._meta.module_name) translatable_fields = get_all_translatable_fields(model, column_in_current_table=True) db_table = model._meta.db_table for field_name in translatable_fields: db_table_fields = self.get_table_fields(db_table) db_change_langs = list(set(list(self.get_db_change_languages(field_name, db_table_fields)) + [self.default_lang])) if db_change_langs: sql_sentences = self.get_sync_sql(field_name, db_change_langs, model, db_table_fields) if sql_sentences: found_db_change_fields = True print_db_change_langs(db_change_langs, field_name, model_full_name) execute_sql = ask_for_confirmation(sql_sentences, model_full_name, assume_yes) if execute_sql: print ('Executing SQL...') for sentence in sql_sentences: self.cursor.execute(sentence) # commit transaction.commit() print ('Done') else: print ('SQL not executed') if transaction.is_dirty(): transaction.commit() transaction.leave_transaction_management() if not found_db_change_fields: print ('\nNo new translatable fields detected') if default_language: variable = 'TRANSMETA_DEFAULT_LANGUAGE' has_transmeta_default_language = getattr(settings, variable, False) if not has_transmeta_default_language: variable = 'LANGUAGE_CODE' if getattr(settings, variable) != default_language: print (('\n\nYou should change in your settings ' 'the %s variable to "%s"' % (variable, default_language)))
get only db changes fields
def get_db_change_languages(self, field_name, db_table_fields): """ get only db changes fields """ for lang_code, lang_name in get_languages(): if get_real_fieldname(field_name, lang_code) not in db_table_fields: yield lang_code for db_table_field in db_table_fields: pattern = re.compile('^%s_(?P<lang>\w{2})$' % field_name) m = pattern.match(db_table_field) if not m: continue lang = m.group('lang') yield lang
returns SQL needed for sync schema for a new translatable field
def get_sync_sql(self, field_name, db_change_langs, model, db_table_fields): """ returns SQL needed for sync schema for a new translatable field """ qn = connection.ops.quote_name style = no_style() sql_output = [] db_table = model._meta.db_table was_translatable_before = self.was_translatable_before(field_name, db_table_fields) default_f = self.get_default_field(field_name, model) default_f_required = default_f and self.get_field_required_in_db(db_table, default_f.name, value_not_implemented=False) for lang in db_change_langs: new_field = get_real_fieldname(field_name, lang) try: f = model._meta.get_field(new_field) col_type = self.get_type_of_db_field(field_name, model) field_column = f.column except FieldDoesNotExist: # columns in db, removed the settings.LANGUGES field_column = new_field col_type = self.get_type_of_db_field(field_name, model) field_sql = [style.SQL_FIELD(qn(field_column)), style.SQL_COLTYPE(col_type)] alter_colum_set = 'ALTER COLUMN %s SET' % qn(field_column) if default_f: alter_colum_drop = 'ALTER COLUMN %s DROP' % qn(field_column) not_null = style.SQL_KEYWORD('NOT NULL') if 'mysql' in backend.__name__: alter_colum_set = 'MODIFY %s %s' % (qn(field_column), col_type) not_null = style.SQL_KEYWORD('NULL') if default_f: alter_colum_drop = 'MODIFY %s %s' % (qn(field_column), col_type) # column creation if not new_field in db_table_fields: sql_output.append("ALTER TABLE %s ADD COLUMN %s" % (qn(db_table), ' '.join(field_sql))) if lang == self.default_lang and not was_translatable_before: # data copy from old field (only for default language) sql_output.append("UPDATE %s SET %s = %s" % (qn(db_table), \ qn(field_column), qn(field_name))) if not f.null: # changing to NOT NULL after having data copied sql_output.append("ALTER TABLE %s %s %s" % \ (qn(db_table), alter_colum_set, \ style.SQL_KEYWORD('NOT NULL'))) elif default_f and not default_f.null: if lang == self.default_lang: f_required = self.get_field_required_in_db(db_table, field_column, value_not_implemented=False) if default_f.name == new_field and default_f_required: continue if not f_required: # data copy from old field (only for default language) sql_output.append(("UPDATE %(db_table)s SET %(f_colum)s = '%(value_default)s' " "WHERE %(f_colum)s is %(null)s or %(f_colum)s = '' " % {'db_table': qn(db_table), 'f_colum': qn(field_column), 'value_default': self.get_value_default(), 'null': style.SQL_KEYWORD('NULL'), })) # changing to NOT NULL after having data copied sql_output.append("ALTER TABLE %s %s %s" % \ (qn(db_table), alter_colum_set, \ style.SQL_KEYWORD('NOT NULL'))) else: f_required = self.get_field_required_in_db(db_table, field_column, value_not_implemented=True) if f_required: sql_output.append(("ALTER TABLE %s %s %s" % (qn(db_table), alter_colum_drop, not_null))) if not was_translatable_before: # we drop field only if field was no translatable before sql_output.append("ALTER TABLE %s DROP COLUMN %s" % (qn(db_table), qn(field_name))) return sql_output
returns all translatable fields in a model ( including superclasses ones )
def get_all_translatable_fields(model, model_trans_fields=None, column_in_current_table=False): """ returns all translatable fields in a model (including superclasses ones) """ if model_trans_fields is None: model_trans_fields = set() model_trans_fields.update(set(getattr(model._meta, 'translatable_fields', []))) for parent in model.__bases__: if getattr(parent, '_meta', None) and (not column_in_current_table or parent._meta.abstract): get_all_translatable_fields(parent, model_trans_fields, column_in_current_table) return tuple(model_trans_fields)
When accessing to the name of the field itself the value in the current language will be returned. Unless it s set the value in the default language will be returned.
def default_value(field): ''' When accessing to the name of the field itself, the value in the current language will be returned. Unless it's set, the value in the default language will be returned. ''' def default_value_func(self): attname = lambda x: get_real_fieldname(field, x) if getattr(self, attname(get_language()), None): result = getattr(self, attname(get_language())) elif getattr(self, attname(get_language()[:2]), None): result = getattr(self, attname(get_language()[:2])) else: default_language = fallback_language() if getattr(self, attname(default_language), None): result = getattr(self, attname(default_language), None) else: result = getattr(self, attname(settings.LANGUAGE_CODE), None) return result return default_value_func
Post processors are functions that receive file objects performs necessary operations and return the results as file objects.
def process(thumbnail_file, size, **kwargs): """ Post processors are functions that receive file objects, performs necessary operations and return the results as file objects. """ from . import conf size_dict = conf.SIZES[size] for processor in size_dict['POST_PROCESSORS']: processor['processor'](thumbnail_file, **processor['kwargs']) return thumbnail_file
A post processing function to optimize file size. Accepts commands to optimize JPG PNG and GIF images as arguments. Example:
def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None): """ A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o3 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } Note: using output redirection in commands may cause unpredictable results. For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause optimize command to fail on some systems. """ temp_dir = get_or_create_temp_dir() thumbnail_filename = os.path.join(temp_dir, "%s" % shortuuid.uuid()) f = open(thumbnail_filename, 'wb') f.write(thumbnail_file.read()) f.close() # Detect filetype filetype = imghdr.what(thumbnail_filename) # Construct command to optimize image based on filetype command = None if filetype == "jpg" or filetype == "jpeg": command = jpg_command elif filetype == "png": command = png_command elif filetype == "gif": command = gif_command # Run Command if command: command = command % {'filename': thumbnail_filename} call(command, shell=True) optimized_file = File(open(thumbnail_filename, 'rb')) os.remove(thumbnail_filename) return optimized_file
Return an attribute from a dotted path name ( e. g. path. to. func ). Copied from nvie s rq https:// github. com/ nvie/ rq/ blob/ master/ rq/ utils. py
def import_attribute(name): """ Return an attribute from a dotted path name (e.g. "path.to.func"). Copied from nvie's rq https://github.com/nvie/rq/blob/master/rq/utils.py """ if hasattr(name, '__call__'): return name module_name, attribute = name.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, attribute)
Returns a dictionary that contains the imported processors and kwargs. For example passing in:
def parse_processors(processor_definition): """ Returns a dictionary that contains the imported processors and kwargs. For example, passing in: processors = [ {'processor': 'thumbnails.processors.resize', 'width': 10, 'height': 10}, {'processor': 'thumbnails.processors.crop', 'width': 10, 'height': 10}, ] Would return: [ {'processor': resize_function, kwargs: {'width': 10, 'height': 10}} {'processor': crop_function, kwargs: {'width': 10, 'height': 10}} ] """ parsed_processors = [] for processor in processor_definition: processor_function = import_attribute(processor['PATH']) kwargs = deepcopy(processor) kwargs.pop('PATH') parsed_processors.append({ 'processor': processor_function, 'kwargs': kwargs }) return parsed_processors
Process an image through its defined processors params: file: filename or file - like object params: size: string for size defined in settings return a ContentFile
def process(file, size): """ Process an image through its defined processors params :file: filename or file-like object params :size: string for size defined in settings return a ContentFile """ from . import conf # open image in piccaso raw_image = images.from_file(file) # run through all processors, if defined size_dict = conf.SIZES[size] for processor in size_dict['PROCESSORS']: raw_image = processor['processor'](raw_image, **processor['kwargs']) # write to Content File image_io = io.BytesIO() raw_image.save(file=image_io) image_file = ContentFile(image_io.getvalue()) #print dir(image_file) return image_file
Process the source image through the defined processors.
def pre_save(self, model_instance, add): """ Process the source image through the defined processors. """ file = getattr(model_instance, self.attname) if file and not file._committed: image_file = file if self.resize_source_to: file.seek(0) image_file = processors.process(file, self.resize_source_to) image_file = post_processors.process(image_file, self.resize_source_to) filename = str(shortuuid.uuid()) + os.path.splitext(file.name)[1] file.save(filename, image_file, save=False) return file
Populate self. _thumbnails.
def _refresh_cache(self): """Populate self._thumbnails.""" self._thumbnails = {} metadatas = self.metadata_backend.get_thumbnails(self.source_image.name) for metadata in metadatas: self._thumbnails[metadata.size] = Thumbnail(metadata=metadata, storage=self.storage)
Return all thumbnails in a dict format.
def all(self): """ Return all thumbnails in a dict format. """ if self._thumbnails is not None: return self._thumbnails self._refresh_cache() return self._thumbnails
Returns a Thumbnail instance. First check whether thumbnail is already cached. If it doesn t: 1. Try to fetch the thumbnail 2. Create thumbnail if it s not present 3. Cache the thumbnail for future use
def get(self, size, create=True): """ Returns a Thumbnail instance. First check whether thumbnail is already cached. If it doesn't: 1. Try to fetch the thumbnail 2. Create thumbnail if it's not present 3. Cache the thumbnail for future use """ if self._thumbnails is None: self._refresh_cache() thumbnail = self._thumbnails.get(size) if thumbnail is None: thumbnail = images.get(self.source_image.name, size, self.metadata_backend, self.storage) if thumbnail is None: thumbnail = self.create(size) self._thumbnails[size] = thumbnail return thumbnail
Creates and return a thumbnail of a given size.
def create(self, size): """ Creates and return a thumbnail of a given size. """ thumbnail = images.create(self.source_image.name, size, self.metadata_backend, self.storage) return thumbnail
Deletes a thumbnail of a given size
def delete(self, size): """ Deletes a thumbnail of a given size """ images.delete(self.source_image.name, size, self.metadata_backend, self.storage) del(self._thumbnails[size])
Creates a thumbnail file and its relevant metadata. Returns a Thumbnail instance.
def create(source_name, size, metadata_backend=None, storage_backend=None): """ Creates a thumbnail file and its relevant metadata. Returns a Thumbnail instance. """ if storage_backend is None: storage_backend = backends.storage.get_backend() if metadata_backend is None: metadata_backend = backends.metadata.get_backend() thumbnail_file = processors.process(storage_backend.open(source_name), size) thumbnail_file = post_processors.process(thumbnail_file, size) name = get_thumbnail_name(source_name, size) name = storage_backend.save(name, thumbnail_file) metadata = metadata_backend.add_thumbnail(source_name, size, name) return Thumbnail(metadata=metadata, storage=storage_backend)
Returns a Thumbnail instance or None if thumbnail does not yet exist.
def get(source_name, size, metadata_backend=None, storage_backend=None): """ Returns a Thumbnail instance, or None if thumbnail does not yet exist. """ if storage_backend is None: storage_backend = backends.storage.get_backend() if metadata_backend is None: metadata_backend = backends.metadata.get_backend() metadata = metadata_backend.get_thumbnail(source_name, size) if metadata is None: return None else: return Thumbnail(metadata=metadata, storage=storage_backend)
Deletes a thumbnail file and its relevant metadata.
def delete(source_name, size, metadata_backend=None, storage_backend=None): """ Deletes a thumbnail file and its relevant metadata. """ if storage_backend is None: storage_backend = backends.storage.get_backend() if metadata_backend is None: metadata_backend = backends.metadata.get_backend() storage_backend.delete(get_thumbnail_name(source_name, size)) metadata_backend.delete_thumbnail(source_name, size)
Simulate an incoming message
def received(self, src, body): """ Simulate an incoming message :type src: str :param src: Message source :type boby: str | unicode :param body: Message body :rtype: IncomingMessage """ # Create the message self._msgid += 1 message = IncomingMessage(src, body, self._msgid) # Log traffic self._traffic.append(message) # Handle it self._receive_message(message) # Finish return message
Register a virtual subscriber which receives messages to the matching number.
def subscribe(self, number, callback): """ Register a virtual subscriber which receives messages to the matching number. :type number: str :param number: Subscriber phone number :type callback: callable :param callback: A callback(OutgoingMessage) which handles the messages directed to the subscriber. The message object is augmented with the .reply(str) method which allows to send a reply easily! :rtype: LoopbackProvider """ self._subscribers[digits_only(number)] = callback return self
Get the set of states. Mostly used for pretty printing
def states(self): """ Get the set of states. Mostly used for pretty printing :rtype: set :returns: Set of 'accepted', 'delivered', 'expired', 'error' """ ret = set() if self.accepted: ret.add('accepted') if self.delivered: ret.add('delivered') if self.expired: ret.add('expired') if self.error: ret.add('error') return ret
Register a provider on the gateway
def add_provider(self, name, Provider, **config): """ Register a provider on the gateway The first provider defined becomes the default one: used in case the routing function has no better idea. :type name: str :param name: Provider name that will be used to uniquely identify it :type Provider: type :param Provider: Provider class that inherits from `smsframework.IProvider` :param config: Provider configuration. Please refer to the Provider documentation. :rtype: IProvider :returns: The created provider """ assert issubclass(Provider, IProvider), 'Provider does not implement IProvider' assert isinstance(name, str), 'Provider name must be a string' # Configure provider = Provider(self, name, **config) # Register assert name not in self._providers, 'Provider is already registered' self._providers[name] = provider # If first - set default if self.default_provider is None: self.default_provider = name # Finish return provider
Send a message object
def send(self, message): """ Send a message object :type message: data.OutgoingMessage :param message: The message to send :rtype: data.OutgoingMessage :returns: The sent message with populated fields :raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage) :raises MessageSendError: generic errors :raises AuthError: provider authentication failed :raises LimitsError: sending limits exceeded :raises CreditError: not enough money on the account """ # Which provider to use? provider_name = self._default_provider # default if message.provider is not None: assert message.provider in self._providers, \ 'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name) provider = self.get_provider(message.provider) else: # Apply routing if message.routing_values is not None: # Use the default provider when no routing values are given # Routing values are present provider_name = self.router(message, *message.routing_values) or self._default_provider assert provider_name in self._providers, \ 'Routing function returned an unknown provider name: {}'.format(provider_name) provider = self.get_provider(provider_name) # Set message provider name message.provider = provider.name # Send the message using the provider message = provider.send(message) # Emit the send event self.onSend(message) # Finish return message
Get a Flask blueprint for the named provider that handles incoming messages & status reports
def receiver_blueprint_for(self, name): """ Get a Flask blueprint for the named provider that handles incoming messages & status reports Note: this requires Flask microframework. :rtype: flask.blueprints.Blueprint :returns: Flask Blueprint, fully functional :raises KeyError: provider not found :raises NotImplementedError: Provider does not implement a receiver """ # Get the provider & blueprint provider = self.get_provider(name) bp = provider.make_receiver_blueprint() # Register a Flask handler that initializes `g.provider` # This is the only way for the blueprint to get the current IProvider instance from flask.globals import g # local import as the user is not required to use receivers at all @bp.before_request def init_g(): g.provider = provider # Finish return bp
Get Flask blueprints for every provider that supports it
def receiver_blueprints(self): """ Get Flask blueprints for every provider that supports it Note: this requires Flask microframework. :rtype: dict :returns: A dict { provider-name: Blueprint } """ blueprints = {} for name in self._providers: try: blueprints[name] = self.receiver_blueprint_for(name) except NotImplementedError: pass # Ignore providers that does not support receivers return blueprints
Register all provider receivers on the provided Flask application under/ { prefix }/ provider - name
def receiver_blueprints_register(self, app, prefix='/'): """ Register all provider receivers on the provided Flask application under '/{prefix}/provider-name' Note: this requires Flask microframework. :type app: flask.Flask :param app: Flask app to register the blueprints on :type prefix: str :param prefix: URL prefix to hide the receivers under. You likely want some random stuff here so no stranger can simulate incoming messages. :rtype: flask.Flask """ # Register for name, bp in self.receiver_blueprints().items(): app.register_blueprint( bp, url_prefix='{prefix}{name}'.format( prefix='/'+prefix.strip('/')+'/' if prefix else '/', name=name ) ) # Finish return app
Incoming message callback
def _receive_message(self, message): """ Incoming message callback Calls Gateway.onReceive event hook Providers are required to: * Cast phone numbers to digits-only * Support both ASCII and Unicode messages * Populate `message.msgid` and `message.meta` fields * If this method fails with an exception, the provider is required to respond with an error to the service :type message: IncomingMessage :param message: The received message :rtype: IncomingMessage """ # Populate fields message.provider = self.name # Fire the event hook self.gateway.onReceive(message) # Finish return message
Incoming status callback
def _receive_status(self, status): """ Incoming status callback Calls Gateway.onStatus event hook Providers are required to: * Cast phone numbers to digits-only * Use proper MessageStatus subclasses * Populate `status.msgid` and `status.meta` fields * If this method fails with an exception, the provider is required to respond with an error to the service :type status: MessageStatus :param status: The received status :rtype: MessageStatus """ # Populate fields status.provider = self.name # Fire the event hook self.gateway.onStatus(status) # Finish return status
Incoming message handler: forwarded by ForwardServerProvider
def im(): """ Incoming message handler: forwarded by ForwardServerProvider """ req = jsonex_loads(request.get_data()) message = g.provider._receive_message(req['message']) return {'message': message}
Incoming status handler: forwarded by ForwardServerProvider
def status(): """ Incoming status handler: forwarded by ForwardServerProvider """ req = jsonex_loads(request.get_data()) status = g.provider._receive_status(req['status']) return {'status': status}
Unserialize with JsonEx: rtype: dict
def jsonex_loads(s): """ Unserialize with JsonEx :rtype: dict """ return json.loads(s.decode('utf-8'), cls=JsonExDecoder, classes=classes, exceptions=exceptions)
View wrapper for JsonEx responses. Catches exceptions as well
def jsonex_api(f): """ View wrapper for JsonEx responses. Catches exceptions as well """ @wraps(f) def wrapper(*args, **kwargs): # Call, catch exceptions try: code, res = 200, f(*args, **kwargs) except HTTPException as e: code, res = e.code, {'error': e} except Exception as e: code, res = 500, {'error': e} logger.exception('Method error') # Response response = make_response(jsonex_dumps(res), code) response.headers['Content-Type'] = 'application/json' return response return wrapper
Parse authentication data from the URL and put it in the headers dict. With caching behavior: param url: URL: type url: str: return: ( URL without authentication info headers dict ): rtype: str dict
def _parse_authentication(url): """ Parse authentication data from the URL and put it in the `headers` dict. With caching behavior :param url: URL :type url: str :return: (URL without authentication info, headers dict) :rtype: str, dict """ u = url h = {} # New headers # Cache? if url in _parse_authentication._memoize: u, h = _parse_authentication._memoize[url] else: # Parse p = urlsplit(url, 'http') if p.username and p.password: # Prepare header h['Authorization'] = b'Basic ' + base64.b64encode(p.username.encode() + b':' + p.password.encode()) # Remove authentication info since urllib2.Request() does not understand it u = urlunsplit((p.scheme, p.netloc.split('@', 1)[1], p.path, p.query, p.fragment)) # Cache _parse_authentication._memoize[url] = (u, h) # Finish return u, h
Make a request with JsonEx: param url: URL: type url: str: param data: Data to POST: type data: dict: return: Response: rtype: dict: raises exc. ConnectionError: Connection error: raises exc. ServerError: Remote server error ( unknown ): raises exc. ProviderError: any errors reported by the remote
def jsonex_request(url, data, headers=None): """ Make a request with JsonEx :param url: URL :type url: str :param data: Data to POST :type data: dict :return: Response :rtype: dict :raises exc.ConnectionError: Connection error :raises exc.ServerError: Remote server error (unknown) :raises exc.ProviderError: any errors reported by the remote """ # Authentication? url, headers = _parse_authentication(url) headers['Content-Type'] = 'application/json' # Request try: req = Request(url, headers=headers) response = urlopen(req, jsonex_dumps(data)) res_str = response.read() res = jsonex_loads(res_str) except HTTPError as e: if 'Content-Type' in e.headers and e.headers['Content-Type'] == 'application/json': res = jsonex_loads(e.read()) else: raise exc.ServerError('Server at "{}" failed: {}'.format(url, e)) except URLError as e: raise exc.ConnectionError('Connection to "{}" failed: {}'.format(url, e)) # Errors? if 'error' in res: # Exception object raise res['error'] # Error raised by the remote side return res
Send a message by forwarding it to the server: param message: Message: type message: smsframework. data. OutgoingMessage: rtype: smsframework. data. OutgoingMessage: raise Exception: any exception reported by the other side: raise urllib2. URLError: Connection error
def send(self, message): """ Send a message by forwarding it to the server :param message: Message :type message: smsframework.data.OutgoingMessage :rtype: smsframework.data.OutgoingMessage :raise Exception: any exception reported by the other side :raise urllib2.URLError: Connection error """ res = jsonex_request(self.server_url + '/im'.lstrip('/'), {'message': message}) msg = res['message'] # OutgoingMessage object # Replace properties in the original object (so it's the same object, like with other providers) for k, v in msg.__dict__.items(): setattr(message, k, v) return message
Forward an object to client: type client: str: type obj: smsframework. data. IncomingMessage|smsframework. data. MessageStatus: rtype: smsframework. data. IncomingMessage|smsframework. data. MessageStatus: raise Exception: any exception reported by the other side
def _forward_object_to_client(self, client, obj): """ Forward an object to client :type client: str :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus :rtype: smsframework.data.IncomingMessage|smsframework.data.MessageStatus :raise Exception: any exception reported by the other side """ url, name = ('/im', 'message') if isinstance(obj, IncomingMessage) else ('/status', 'status') res = jsonex_request(client.rstrip('/') + '/' + url.lstrip('/'), {name: obj}) return res[name]
Forward an object to clients.
def forward(self, obj): """ Forward an object to clients. :param obj: The object to be forwarded :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus :raises Exception: if any of the clients failed """ assert isinstance(obj, (IncomingMessage, MessageStatus)), 'Tried to forward an object of an unsupported type: {}'.format(obj) clients = self.choose_clients(obj) if Parallel: pll = Parallel(self._forward_object_to_client) for client in clients: pll(client, obj) results, errors = pll.join() if errors: raise errors[0] else: for client in clients: self._forward_object_to_client(client, obj)
Returns a dictionnary of dictionnary that contains critical information about the transport and protocol behavior such as: * amount of received frames * amount of badly delimited frames * amount of correctly delimited but still corrupted frames * etc
def stats(self): """ Returns a dictionnary of dictionnary that contains critical information about the transport and protocol behavior, such as: * amount of received frames * amount of badly delimited frames * amount of correctly delimited but still corrupted frames * etc """ d = dict() d['framing'] = self.api.delimiter.stats() d['protocol'] = self.api.stats() return d
Get balance of address for erc20_address: param address: owner address: param erc20_address: erc20 token address: return: balance
def get_balance(self, address: str, erc20_address: str) -> int: """ Get balance of address for `erc20_address` :param address: owner address :param erc20_address: erc20 token address :return: balance """ return get_erc20_contract(self.w3, erc20_address).functions.balanceOf(address).call()
Get erc20 information ( name symbol and decimals ): param erc20_address:: return: Erc20_Info
def get_info(self, erc20_address: str) -> Erc20_Info: """ Get erc20 information (`name`, `symbol` and `decimals`) :param erc20_address: :return: Erc20_Info """ # We use the `example erc20` as the `erc20 interface` doesn't have `name`, `symbol` nor `decimals` erc20 = get_example_erc20_contract(self.w3, erc20_address) name = erc20.functions.name().call() symbol = erc20.functions.symbol().call() decimals = erc20.functions.decimals().call() return Erc20_Info(name, symbol, decimals)
Get events for erc20 transfers. At least one of from_address to_address or token_address must be defined An example of event: { args: { from: 0x1Ce67Ea59377A163D47DFFc9BaAB99423BE6EcF1 to: 0xaE9E15896fd32E59C7d89ce7a95a9352D6ebD70E value: 15000000000000000 } event: Transfer logIndex: 42 transactionIndex: 60 transactionHash: 0x71d6d83fef3347bad848e83dfa0ab28296e2953de946ee152ea81c6dfb42d2b3 address: 0xfecA834E7da9D437645b474450688DA9327112a5 blockHash: 0x054de9a496fc7d10303068cbc7ee3e25181a3b26640497859a5e49f0342e7db2 blockNumber: 7265022 }: param from_block: Block to start querying from: param to_block: Block to stop querying from: param from_address: Address sending the erc20 transfer: param to_address: Address receiving the erc20 transfer: param token_address: Address of the token: return: List of events: throws: ReadTimeout
def get_transfer_history(self, from_block: int, to_block: Optional[int] = None, from_address: Optional[str] = None, to_address: Optional[str] = None, token_address: Optional[str] = None) -> List[Dict[str, any]]: """ Get events for erc20 transfers. At least one of `from_address`, `to_address` or `token_address` must be defined An example of event: { "args": { "from": "0x1Ce67Ea59377A163D47DFFc9BaAB99423BE6EcF1", "to": "0xaE9E15896fd32E59C7d89ce7a95a9352D6ebD70E", "value": 15000000000000000 }, "event": "Transfer", "logIndex": 42, "transactionIndex": 60, "transactionHash": "0x71d6d83fef3347bad848e83dfa0ab28296e2953de946ee152ea81c6dfb42d2b3", "address": "0xfecA834E7da9D437645b474450688DA9327112a5", "blockHash": "0x054de9a496fc7d10303068cbc7ee3e25181a3b26640497859a5e49f0342e7db2", "blockNumber": 7265022 } :param from_block: Block to start querying from :param to_block: Block to stop querying from :param from_address: Address sending the erc20 transfer :param to_address: Address receiving the erc20 transfer :param token_address: Address of the token :return: List of events :throws: ReadTimeout """ assert from_address or to_address or token_address, 'At least one parameter must be provided' erc20 = get_erc20_contract(self.w3) argument_filters = {} if from_address: argument_filters['from'] = from_address if to_address: argument_filters['to'] = to_address return erc20.events.Transfer.createFilter(fromBlock=from_block, toBlock=to_block, address=token_address, argument_filters=argument_filters).get_all_entries()
Send tokens to address: param to:: param amount:: param erc20_address:: param private_key:: return: tx_hash
def send_tokens(self, to: str, amount: int, erc20_address: str, private_key: str) -> bytes: """ Send tokens to address :param to: :param amount: :param erc20_address: :param private_key: :return: tx_hash """ erc20 = get_erc20_contract(self.w3, erc20_address) account = Account.privateKeyToAccount(private_key) tx = erc20.functions.transfer(to, amount).buildTransaction({'from': account.address}) return self.ethereum_client.send_unsigned_transaction(tx, private_key=private_key)
: param from_block: Quantity or Tag - ( optional ) From this block. 0 is not working it needs to be > = 1: param to_block: Quantity or Tag - ( optional ) To this block.: param from_address: Array - ( optional ) Sent from these addresses.: param to_address: Address - ( optional ) Sent to these addresses.: param after: Quantity - ( optional ) The offset trace number: param count: Quantity - ( optional ) Integer number of traces to display in a batch.: return: [ { action: { callType: call from: 0x32be343b94f860124dc4fee278fdcbd38c102d88 gas: 0x4c40d input: 0x to: 0x8bbb73bcb5d553b5a556358d27625323fd781d37 value: 0x3f0650ec47fd240000 } blockHash: 0x86df301bcdd8248d982dbf039f09faf792684e1aeee99d5b58b77d620008b80f blockNumber: 3068183 result: { gasUsed: 0x0 output: 0x } subtraces: 0 traceAddress: [] transactionHash: 0x3321a7708b1083130bd78da0d62ead9f6683033231617c9d268e2c7e3fa6c104 transactionPosition: 3 type: call } { action: { from: 0x3b169a0fb55ea0b6bafe54c272b1fe4983742bf7 gas: 0x49b0b init: 0x608060405234801561001057600080fd5b5060405161060a38038061060a833981018060405281019080805190602001909291908051820192919060200180519060200190929190805190602001909291908051906020019092919050505084848160008173ffffffffffffffffffffffffffffffffffffffff1614151515610116576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260248152602001807f496e76616c6964206d617374657220636f707920616464726573732070726f7681526020017f696465640000000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550506000815111156101a35773ffffffffffffffffffffffffffffffffffffffff60005416600080835160208501846127105a03f46040513d6000823e600082141561019f573d81fd5b5050505b5050600081111561036d57600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156102b7578273ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f1935050505015156102b2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001807f436f756c64206e6f74207061792073616665206372656174696f6e207769746881526020017f206574686572000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b61036c565b6102d1828483610377640100000000026401000000009004565b151561036b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001807f436f756c64206e6f74207061792073616665206372656174696f6e207769746881526020017f20746f6b656e000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b5b5b5050505050610490565b600060608383604051602401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506040516020818303038152906040527fa9059cbb000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506000808251602084016000896127105a03f16040513d6000823e3d60008114610473576020811461047b5760009450610485565b829450610485565b8151158315171594505b505050509392505050565b61016b8061049f6000396000f30060806040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680634555d5c91461008b5780635c60da1b146100b6575b73ffffffffffffffffffffffffffffffffffffffff600054163660008037600080366000845af43d6000803e6000811415610086573d6000fd5b3d6000f35b34801561009757600080fd5b506100a061010d565b6040518082815260200191505060405180910390f35b3480156100c257600080fd5b506100cb610116565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60006002905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050905600a165627a7a7230582007fffd557dfc8c4d2fdf56ba6381a6ce5b65b6260e1492d87f26c6d4f1d0410800290000000000000000000000008942595a2dc5181df0465af0d7be08c8f23c93af00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000d9e09beaeb338d81a7c5688358df0071d498811500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b15f91a8c35300000000000000000000000000000000000000000000000000000000000001640ec78d9e00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000004000000000000000000000000f763ea5fbb191d47dc4b083dcdc3cdfb586468f8000000000000000000000000ad25c9717d04c0a12086a1d352c1ccf4bf5fcbf80000000000000000000000000da7155692446c80a4e7ad72018e586f20fa3bfe000000000000000000000000bce0cc48ce44e0ac9ee38df4d586afbacef191fa0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 value: 0x0 } blockHash: 0x03f9f64dfeb7807b5df608e6957dd4d521fd71685aac5533451d27f0abe03660 blockNumber: 3793534 result: { address: 0x61a7cc907c47c133d5ff5b685407201951fcbd08 code: 0x60806040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680634555d5c91461008b5780635c60da1b146100b6575b73ffffffffffffffffffffffffffffffffffffffff600054163660008037600080366000845af43d6000803e6000811415610086573d6000fd5b3d6000f35b34801561009757600080fd5b506100a061010d565b6040518082815260200191505060405180910390f35b3480156100c257600080fd5b506100cb610116565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60006002905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050905600a165627a7a7230582007fffd557dfc8c4d2fdf56ba6381a6ce5b65b6260e1492d87f26c6d4f1d041080029 gasUsed: 0x4683f } subtraces: 2 traceAddress: [] transactionHash: 0x6c7e8f8778d33d81b29c4bd7526ee50a4cea340d69eed6c89ada4e6fab731789 transactionPosition: 1 type: create }... ]
def trace_filter(self, from_block: int = 1, to_block: Optional[int] = None, from_address: Optional[List[str]] = None, to_address: Optional[List[str]] = None, after: Optional[int] = None, count: Optional[int] = None) -> List[Dict[str, any]]: """ :param from_block: Quantity or Tag - (optional) From this block. `0` is not working, it needs to be `>= 1` :param to_block: Quantity or Tag - (optional) To this block. :param from_address: Array - (optional) Sent from these addresses. :param to_address: Address - (optional) Sent to these addresses. :param after: Quantity - (optional) The offset trace number :param count: Quantity - (optional) Integer number of traces to display in a batch. :return: [ { "action": { "callType": "call", "from": "0x32be343b94f860124dc4fee278fdcbd38c102d88", "gas": "0x4c40d", "input": "0x", "to": "0x8bbb73bcb5d553b5a556358d27625323fd781d37", "value": "0x3f0650ec47fd240000" }, "blockHash": "0x86df301bcdd8248d982dbf039f09faf792684e1aeee99d5b58b77d620008b80f", "blockNumber": 3068183, "result": { "gasUsed": "0x0", "output": "0x" }, "subtraces": 0, "traceAddress": [], "transactionHash": "0x3321a7708b1083130bd78da0d62ead9f6683033231617c9d268e2c7e3fa6c104", "transactionPosition": 3, "type": "call" }, { "action": { "from": "0x3b169a0fb55ea0b6bafe54c272b1fe4983742bf7", "gas": "0x49b0b", "init": "0x608060405234801561001057600080fd5b5060405161060a38038061060a833981018060405281019080805190602001909291908051820192919060200180519060200190929190805190602001909291908051906020019092919050505084848160008173ffffffffffffffffffffffffffffffffffffffff1614151515610116576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260248152602001807f496e76616c6964206d617374657220636f707920616464726573732070726f7681526020017f696465640000000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550506000815111156101a35773ffffffffffffffffffffffffffffffffffffffff60005416600080835160208501846127105a03f46040513d6000823e600082141561019f573d81fd5b5050505b5050600081111561036d57600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156102b7578273ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f1935050505015156102b2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001807f436f756c64206e6f74207061792073616665206372656174696f6e207769746881526020017f206574686572000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b61036c565b6102d1828483610377640100000000026401000000009004565b151561036b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001807f436f756c64206e6f74207061792073616665206372656174696f6e207769746881526020017f20746f6b656e000000000000000000000000000000000000000000000000000081525060400191505060405180910390fd5b5b5b5050505050610490565b600060608383604051602401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506040516020818303038152906040527fa9059cbb000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506000808251602084016000896127105a03f16040513d6000823e3d60008114610473576020811461047b5760009450610485565b829450610485565b8151158315171594505b505050509392505050565b61016b8061049f6000396000f30060806040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680634555d5c91461008b5780635c60da1b146100b6575b73ffffffffffffffffffffffffffffffffffffffff600054163660008037600080366000845af43d6000803e6000811415610086573d6000fd5b3d6000f35b34801561009757600080fd5b506100a061010d565b6040518082815260200191505060405180910390f35b3480156100c257600080fd5b506100cb610116565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60006002905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050905600a165627a7a7230582007fffd557dfc8c4d2fdf56ba6381a6ce5b65b6260e1492d87f26c6d4f1d0410800290000000000000000000000008942595a2dc5181df0465af0d7be08c8f23c93af00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000d9e09beaeb338d81a7c5688358df0071d498811500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b15f91a8c35300000000000000000000000000000000000000000000000000000000000001640ec78d9e00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000004000000000000000000000000f763ea5fbb191d47dc4b083dcdc3cdfb586468f8000000000000000000000000ad25c9717d04c0a12086a1d352c1ccf4bf5fcbf80000000000000000000000000da7155692446c80a4e7ad72018e586f20fa3bfe000000000000000000000000bce0cc48ce44e0ac9ee38df4d586afbacef191fa0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "value": "0x0" }, "blockHash": "0x03f9f64dfeb7807b5df608e6957dd4d521fd71685aac5533451d27f0abe03660", "blockNumber": 3793534, "result": { "address": "0x61a7cc907c47c133d5ff5b685407201951fcbd08", "code": "0x60806040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680634555d5c91461008b5780635c60da1b146100b6575b73ffffffffffffffffffffffffffffffffffffffff600054163660008037600080366000845af43d6000803e6000811415610086573d6000fd5b3d6000f35b34801561009757600080fd5b506100a061010d565b6040518082815260200191505060405180910390f35b3480156100c257600080fd5b506100cb610116565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60006002905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050905600a165627a7a7230582007fffd557dfc8c4d2fdf56ba6381a6ce5b65b6260e1492d87f26c6d4f1d041080029", "gasUsed": "0x4683f" }, "subtraces": 2, "traceAddress": [], "transactionHash": "0x6c7e8f8778d33d81b29c4bd7526ee50a4cea340d69eed6c89ada4e6fab731789", "transactionPosition": 1, "type": "create" }, ... ] """ assert from_address or to_address, 'You must provide at least `from_address` or `to_address`' parameters = {} if from_block: parameters['fromBlock'] = '0x%x' % from_block if to_block: parameters['toBlock'] = '0x%x' % to_block if from_address: parameters['fromAddress'] = from_address if to_address: parameters['toAddress'] = to_address if after: parameters['after'] = after if count: parameters['count'] = count try: return self._decode_traces(self.slow_w3.parity.traceFilter(parameters)) except ParityTraceDecodeException as exc: logger.warning('Problem decoding trace: %s - Retrying', exc) return self._decode_traces(self.slow_w3.parity.traceFilter(parameters))
Get web3 provider for slow queries. Default HTTPProvider timeouts after 10 seconds: param provider: Configured Web3 provider: param timeout: Timeout to configure for internal requests ( default is 10 ): return: A new web3 provider with the slow_provider_timeout
def get_slow_provider(self, timeout: int): """ Get web3 provider for slow queries. Default `HTTPProvider` timeouts after 10 seconds :param provider: Configured Web3 provider :param timeout: Timeout to configure for internal requests (default is 10) :return: A new web3 provider with the `slow_provider_timeout` """ if isinstance(self.w3_provider, AutoProvider): return HTTPProvider(endpoint_uri='http://localhost:8545', request_kwargs={'timeout': timeout}) elif isinstance(self.w3_provider, HTTPProvider): return HTTPProvider(endpoint_uri=self.w3_provider.endpoint_uri, request_kwargs={'timeout': timeout}) else: return self.w3_provider
Send a tx using an unlocked public key in the node or a private key. Both public_key and private_key cannot be None: param tx:: param private_key:: param public_key:: param retry: Retry if a problem with nonce is found: param block_identifier:: return: tx hash
def send_unsigned_transaction(self, tx: Dict[str, any], private_key: Optional[str] = None, public_key: Optional[str] = None, retry: bool = False, block_identifier: Optional[str] = None) -> bytes: """ Send a tx using an unlocked public key in the node or a private key. Both `public_key` and `private_key` cannot be `None` :param tx: :param private_key: :param public_key: :param retry: Retry if a problem with nonce is found :param block_identifier: :return: tx hash """ if private_key: address = self.private_key_to_address(private_key) elif public_key: address = public_key else: logger.error('No ethereum account provided. Need a public_key or private_key') raise ValueError("Ethereum account was not configured or unlocked in the node") if tx.get('nonce') is None: tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier) number_errors = 5 while number_errors >= 0: try: if private_key: signed_tx = self.w3.eth.account.signTransaction(tx, private_key=private_key) logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to']) try: return self.send_raw_transaction(signed_tx.rawTransaction) except TransactionAlreadyImported as e: # Sometimes Parity 2.2.11 fails with Transaction already imported, even if it's not, but it's # processed tx_hash = signed_tx.hash logger.error('Transaction with tx-hash=%s already imported: %s' % (tx_hash.hex(), str(e))) return tx_hash elif public_key: tx['from'] = address return self.send_transaction(tx) except ReplacementTransactionUnderpriced as e: if not retry or not number_errors: raise e logger.error('address=%s Tx with nonce=%d was already sent, retrying with nonce + 1', address, tx['nonce']) tx['nonce'] += 1 except InvalidNonce as e: if not retry or not number_errors: raise e logger.error('address=%s Tx with invalid nonce=%d, retrying recovering nonce again', address, tx['nonce']) tx['nonce'] = self.get_nonce_for_account(address, block_identifier=block_identifier) number_errors -= 1
Send ether using configured account: param to: to: param gas_price: gas_price: param value: value ( wei ): param gas: gas defaults to 22000: param retry: Retry if a problem is found: param block_identifier: None default pending not confirmed txs: return: tx_hash
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000, retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes: """ Send ether using configured account :param to: to :param gas_price: gas_price :param value: value(wei) :param gas: gas, defaults to 22000 :param retry: Retry if a problem is found :param block_identifier: None default, 'pending' not confirmed txs :return: tx_hash """ assert check_checksum(to) if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, 'ether'): raise EtherLimitExceeded('%d is bigger than %f' % (value, max_eth_to_send)) tx = { 'to': to, 'value': value, 'gas': gas, 'gasPrice': gas_price, } return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry, block_identifier=block_identifier)
Check tx hash and make sure it has the confirmations required: param w3: Web3 instance: param tx_hash: Hash of the tx: param confirmations: Minimum number of confirmations required: return: True if tx was mined with the number of confirmations required False otherwise
def check_tx_with_confirmations(self, tx_hash: str, confirmations: int) -> bool: """ Check tx hash and make sure it has the confirmations required :param w3: Web3 instance :param tx_hash: Hash of the tx :param confirmations: Minimum number of confirmations required :return: True if tx was mined with the number of confirmations required, False otherwise """ tx_receipt = self.w3.eth.getTransactionReceipt(tx_hash) if not tx_receipt or tx_receipt['blockNumber'] is None: # If tx_receipt exists but blockNumber is None, tx is still pending (just Parity) return False else: return (self.w3.eth.blockNumber - tx_receipt['blockNumber']) >= confirmations
: return: checksum encoded address starting by 0x for example 0x568c93675A8dEb121700A6FAdDdfE7DFAb66Ae4A: rtype: str
def get_signing_address(hash: Union[bytes, str], v: int, r: int, s: int) -> str: """ :return: checksum encoded address starting by 0x, for example `0x568c93675A8dEb121700A6FAdDdfE7DFAb66Ae4A` :rtype: str """ encoded_64_address = ecrecover_to_pub(hash, v, r, s) address_bytes = sha3(encoded_64_address)[-20:] return checksum_encode(address_bytes)
Generates an address for a contract created using CREATE2.: param from_: The address which is creating this new address ( need to be 20 bytes ): param salt: A salt ( 32 bytes ): param init_code: A init code of the contract being created: return: Address of the new contract
def generate_address_2(from_: Union[str, bytes], salt: Union[str, bytes], init_code: [str, bytes]) -> str: """ Generates an address for a contract created using CREATE2. :param from_: The address which is creating this new address (need to be 20 bytes) :param salt: A salt (32 bytes) :param init_code: A init code of the contract being created :return: Address of the new contract """ from_ = HexBytes(from_) salt = HexBytes(salt) init_code = HexBytes(init_code) assert len(from_) == 20, "Address %s is not valid. Must be 20 bytes" % from_ assert len(salt) == 32, "Salt %s is not valid. Must be 32 bytes" % salt assert len(init_code) > 0, "Init code %s is not valid" % init_code init_code_hash = Web3.sha3(init_code) contract_address = Web3.sha3(HexBytes('ff') + from_ + salt + init_code_hash) return Web3.toChecksumAddress(contract_address[12:])