code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def encode_time(o): """ Encodes a Python datetime.time object as an ECMA-262 compliant time string.""" r = o.isoformat() if o.microsecond: r = r[:12] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r
Encodes a Python datetime.time object as an ECMA-262 compliant time string.
def iplot(figure,validate=True,sharing=None,filename='', online=None,asImage=False,asUrl=False,asPlot=False, dimensions=None,display_image=True,**kwargs): """ Plots a figure in IPython, creates an HTML or generates an Image figure : figure Plotly figure to be charted validate : bool If True then all values are validated before it is charted sharing : string Sets the sharing level permission public - anyone can see this chart private - only you can see this chart secret - only people with the link can see the chart filename : string Name to be used to save the file in the server, or as an image online : bool If True then the chart/image is rendered on the server even when running in offline mode. asImage : bool If True it returns an Image (png) In ONLINE mode: Image file is saved in the working directory Accepts: filename dimensions scale display_image In OFFLINE mode: Image file is downloaded (downloads folder) and a regular plotly chart is displayed in Jupyter Accepts: filename dimensions asUrl : bool If True the chart url/path is returned. No chart is displayed. If Online : the URL is returned If Offline : the local path is returned asPlot : bool If True the chart opens in browser dimensions : tuple(int,int) Dimensions for image (width,height) display_image : bool If true, then the image is displayed after it has been saved Requires Jupyter Notebook Only valid when asImage=True Other Kwargs ============ legend : bool If False then the legend will not be shown scale : integer Increase the resolution of the image by `scale` amount Only valid when asImage=True """ valid_kwargs=['world_readable','legend','scale'] for key in list(kwargs.keys()): if key not in valid_kwargs: raise Exception("Invalid keyword : '{0}'".format(key)) if 'legend' in kwargs: if 'layout' in figure: figure['layout'].update(showlegend=kwargs['legend']) ## Sharing Values if all(['world_readable' in kwargs,sharing is None]): sharing=kwargs['world_readable'] if isinstance(sharing,bool): if sharing: sharing='public' else: sharing='private' if sharing is None: sharing=auth.get_config_file()['sharing'] ## Filename Handling if not filename: # if not figure.get('layout', None): # figure['layout'] = {} try: filename=figure['layout']['title'] except: filename='Plotly Playground {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S")) ## Dimensions if not dimensions: dimensions=(800,500) if not auth.get_config_file()['dimensions'] else auth.get_config_file()['dimensions'] ## Offline Links show_link = auth.get_config_file()['offline_show_link'] link_text = auth.get_config_file()['offline_link_text'] config = auth.get_config_file()['offline_config'] ## Remove validation if shapes are present if 'layout' in figure: validate = False if 'shapes' in figure['layout'] else validate ## asURL auto_open=True if asUrl: asPlot=True auto_open=False ## Exports if asImage: if offline.is_offline() and not online: return offline.py_offline.iplot(figure,validate=validate, filename=filename, show_link=show_link,link_text=link_text, image='png', image_width=dimensions[0], image_height=dimensions[1], config=config) else: try: py.image.save_as(figure,filename='img/'+filename,format='png', width=dimensions[0],height=dimensions[1],scale=kwargs.get('scale',None)) path='img/'+filename+'.png' except: py.image.save_as(figure,filename=filename,format='png', width=dimensions[0],height=dimensions[1],scale=kwargs.get('scale',None)) path=filename+'.png' if display_image: return display(Image(path)) else: print('Image saved : {0}'.format(path)) return None ## asPlot and asUrl if asPlot: filename+='.html' if offline.is_offline() and not online: return offline.py_offline.plot(figure, filename=filename, validate=validate, show_link=show_link, link_text=link_text, auto_open=auto_open, config=config) else: return py.plot(figure, sharing=sharing, filename=filename, validate=validate, auto_open=auto_open) ## iplot if offline.is_offline() and not online: return offline.py_offline.iplot(figure, validate=validate, filename=filename, show_link=show_link, link_text=link_text, config=config) else: return py.iplot(figure,validate=validate,sharing=sharing, filename=filename)
Plots a figure in IPython, creates an HTML or generates an Image figure : figure Plotly figure to be charted validate : bool If True then all values are validated before it is charted sharing : string Sets the sharing level permission public - anyone can see this chart private - only you can see this chart secret - only people with the link can see the chart filename : string Name to be used to save the file in the server, or as an image online : bool If True then the chart/image is rendered on the server even when running in offline mode. asImage : bool If True it returns an Image (png) In ONLINE mode: Image file is saved in the working directory Accepts: filename dimensions scale display_image In OFFLINE mode: Image file is downloaded (downloads folder) and a regular plotly chart is displayed in Jupyter Accepts: filename dimensions asUrl : bool If True the chart url/path is returned. No chart is displayed. If Online : the URL is returned If Offline : the local path is returned asPlot : bool If True the chart opens in browser dimensions : tuple(int,int) Dimensions for image (width,height) display_image : bool If true, then the image is displayed after it has been saved Requires Jupyter Notebook Only valid when asImage=True Other Kwargs ============ legend : bool If False then the legend will not be shown scale : integer Increase the resolution of the image by `scale` amount Only valid when asImage=True
def add_to_loader(loader_cls: Type, classes: List[Type]) -> None: """Registers one or more classes with a YAtiML loader. Once a class has been registered, it can be recognized and \ constructed when reading a YAML text. Args: loader_cls: The loader to register the classes with. classes: The class(es) to register, a plain Python class or a \ list of them. """ if not isinstance(classes, list): classes = [classes] # type: ignore for class_ in classes: tag = '!{}'.format(class_.__name__) if issubclass(class_, enum.Enum): loader_cls.add_constructor(tag, EnumConstructor(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): loader_cls.add_constructor(tag, UserStringConstructor(class_)) else: loader_cls.add_constructor(tag, Constructor(class_)) if not hasattr(loader_cls, '_registered_classes'): loader_cls._registered_classes = dict() loader_cls._registered_classes[tag] = class_
Registers one or more classes with a YAtiML loader. Once a class has been registered, it can be recognized and \ constructed when reading a YAML text. Args: loader_cls: The loader to register the classes with. classes: The class(es) to register, a plain Python class or a \ list of them.
def _list_of_dicts_to_column_headers(list_of_dicts): """ Detects if all entries in an list of ``dict``'s have identical keys. Returns the keys if all keys are the same and ``None`` otherwise. Parameters ---------- list_of_dicts : list List of dictionaries to test for identical keys. Returns ------- list or None List of column headers if all dictionary posessed the same keys. Returns ``None`` otherwise. """ if len(list_of_dicts) < 2 or not all(isinstance(item, dict) for item in list_of_dicts): return None column_headers = list_of_dicts[0].keys() for d in list_of_dicts[1:]: if len(d.keys()) != len(column_headers) or not all(header in d for header in column_headers): return None return column_headers
Detects if all entries in an list of ``dict``'s have identical keys. Returns the keys if all keys are the same and ``None`` otherwise. Parameters ---------- list_of_dicts : list List of dictionaries to test for identical keys. Returns ------- list or None List of column headers if all dictionary posessed the same keys. Returns ``None`` otherwise.
def _get_net_runner_opts(): ''' Return the net.find runner options. ''' runner_opts = __opts__.get('runners', {}).get('net.find', {}) return { 'target': runner_opts.get('target', _DEFAULT_TARGET), 'expr_form': runner_opts.get('expr_form', _DEFAULT_EXPR_FORM), 'ignore_interfaces': runner_opts.get('ignore_interfaces', _DEFAULT_IGNORE_INTF), 'display': runner_opts.get('display', _DEFAULT_DISPLAY), 'outputter': runner_opts.get('outputter', _DEFAULT_OUTPUTTER), }
Return the net.find runner options.
def distanceToMesh(self, actor, signed=False, negate=False): ''' Computes the (signed) distance from one mesh to another. .. hint:: |distance2mesh| |distance2mesh.py|_ ''' poly1 = self.polydata() poly2 = actor.polydata() df = vtk.vtkDistancePolyDataFilter() df.SetInputData(0, poly1) df.SetInputData(1, poly2) if signed: df.SignedDistanceOn() if negate: df.NegateDistanceOn() df.Update() scals = df.GetOutput().GetPointData().GetScalars() poly1.GetPointData().AddArray(scals) poly1.GetPointData().SetActiveScalars(scals.GetName()) rng = scals.GetRange() self.mapper.SetScalarRange(rng[0], rng[1]) self.mapper.ScalarVisibilityOn() return self
Computes the (signed) distance from one mesh to another. .. hint:: |distance2mesh| |distance2mesh.py|_
def run_plate_bias(in_prefix, in_type, out_prefix, base_dir, options): """Runs step7 (plate bias). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.PlateBias.plate_bias` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.PlateBias.plate_bias` module doesn't return usable output files. Hence, this function returns the input file prefix and its type. """ # Creating the output directory os.mkdir(out_prefix) # We know we need bfile required_type = "bfile" check_input_files(in_prefix, in_type, required_type) # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "plate_bias") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: plate_bias.main(options) except plate_bias.ProgramError as e: msg = "plate_bias: {}".format(e) raise ProgramError(msg) # The name of the summary file filename = script_prefix + ".significant_SNPs.summary" if not os.path.isfile(filename): raise ProgramError("{}: no such file".format(filename)) # Getting the number of each plate plate_counter = None with open(filename, "r") as i_file: header = { name: i for i, name in enumerate(i_file.readline().rstrip("\r\n").split("\t")) } if "plate" not in header: msg = "{}: missing column plate".format(filename) raise ProgramError(msg) # Counting the number of markers for each plate plate_counter = Counter( line.rstrip("\r\n").split("\t")[header["plate"]] for line in i_file ) # Creating the table table = [["plate name", "number of markers"]] for plate_name, number in plate_counter.most_common(): table.append([ latex_template.sanitize_tex(plate_name), "{:,d}".format(number), ]) # The number of unique markers filename = script_prefix + ".significant_SNPs.txt" nb_markers = None with open(filename, "r") as i_file: nb_markers = len({line.rstrip("\r\n") for line in i_file}) # Getting the p value threshold p_threshold = str(plate_bias.parser.get_default("pfilter")) if "--pfilter" in options: p_threshold = str(options[options.index("--pfilter") + 1]) # We write a LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection(plate_bias.pretty_name) text = ( "After performing the plate bias analysis using Plink, a " "total of {:,d} unique marker{} had a significant result " "({} a value less than {}).".format( nb_markers, "s" if nb_markers > 1 else "", r"\textit{i.e.}", latex_template.format_numbers(p_threshold), ) ) print >>o_file, latex_template.wrap_lines(text) if nb_markers > 0: table_label = re.sub( r"[/\\]", "_", script_prefix, ) + "_plate_bias" text = ( r"Table~\ref{" + table_label + "} summarizes the plate " "bias results." ) print >>o_file, latex_template.wrap_lines(text) # Getting the template longtable_template = latex_template.jinja2_env.get_template( "longtable_template.tex", ) # The table caption table_caption = ( "Summary of the plate bias analysis performed by Plink. " "For each plate, the number of significant marker{} is " "shown (threshold of {}). The plates are sorted according " "to the total number of significant results.".format( "s" if nb_markers > 1 else "", latex_template.format_numbers(p_threshold), ) ) print >>o_file, longtable_template.render( table_caption=table_caption, table_label=table_label, nb_col=len(table[1]), col_alignments="lr", text_size="normalsize", header_data=zip(table[0], [1 for i in table[0]]), tabular_data=table[1:], ) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # Writing the summary results with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) print >>o_file, ("Number of markers with plate bias (p<{})\t" "{:,d}".format(p_threshold, nb_markers)) print >>o_file, "---" # We know this step doesn't produce an new data set, so we return the old # prefix and the old in_type return _StepResult( next_file=in_prefix, next_file_type=required_type, latex_summary=latex_file, description=plate_bias.desc, long_description=plate_bias.long_desc, graph_path=None, )
Runs step7 (plate bias). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.PlateBias.plate_bias` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.PlateBias.plate_bias` module doesn't return usable output files. Hence, this function returns the input file prefix and its type.
def create_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=None, profile=None, **args): ''' Create an ElastiCache subnet group Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_subnet_group name=my-subnet-group \ CacheSubnetGroupDescription="description" \ subnets='[myVPCSubnet1,myVPCSubnet2]' ''' if subnets: if 'SubnetIds' not in args: args['SubnetIds'] = [] if not isinstance(subnets, list): subnets = [subnets] for subnet in subnets: if subnet.startswith('subnet-'): # Moderately safe assumption... :) Will be caught further down if incorrect. args['SubnetIds'] += [subnet] continue sn = __salt__['boto_vpc.describe_subnets'](subnet_names=subnet, region=region, key=key, keyid=keyid, profile=profile).get('subnets') if not sn: raise SaltInvocationError( 'Could not resolve Subnet Name {0} to an ID.'.format(subnet)) if len(sn) == 1: args['SubnetIds'] += [sn[0]['id']] elif len(sn) > 1: raise CommandExecutionError( 'Subnet Name {0} returned more than one ID.'.format(subnet)) args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) return _create_resource(name, name_param='CacheSubnetGroupName', desc='cache subnet group', res_type='cache_subnet_group', region=region, key=key, keyid=keyid, profile=profile, **args)
Create an ElastiCache subnet group Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_subnet_group name=my-subnet-group \ CacheSubnetGroupDescription="description" \ subnets='[myVPCSubnet1,myVPCSubnet2]'
def random_patt_uniform(nrows, ncols, patt_type=scalar): """Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to a normal random variable with zero mean and unit standard deviation. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.random_patt_uniform(6, 8) # same as above >>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1. """ if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) if(patt_type == scalar): vec = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return ScalarPatternUniform(vec.reshape((nrows, ncols)), doublesphere=False) elif(patt_type == vector): vec1 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) vec2 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return TransversePatternUniform(vec1.reshape((nrows, ncols)), vec2.reshape((nrows, ncols)), doublesphere=False) else: raise TypeError(err_msg['ukn_patt_t'])
Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to a normal random variable with zero mean and unit standard deviation. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.random_patt_uniform(6, 8) # same as above >>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1.
def openByVendorIDAndProductID( self, vendor_id, product_id, skip_on_access_error=False, skip_on_error=False): """ Get the first USB device matching given vendor and product ids. Returns an USBDeviceHandle instance, or None if no present device match. skip_on_error (bool) (see getDeviceList) skip_on_access_error (bool) (see getDeviceList) """ result = self.getByVendorIDAndProductID( vendor_id, product_id, skip_on_access_error=skip_on_access_error, skip_on_error=skip_on_error) if result is not None: return result.open()
Get the first USB device matching given vendor and product ids. Returns an USBDeviceHandle instance, or None if no present device match. skip_on_error (bool) (see getDeviceList) skip_on_access_error (bool) (see getDeviceList)
def selectInvert( self ): """ Inverts the currently selected items in the scene. """ currLayer = self._currentLayer for item in self.items(): layer = item.layer() if ( layer == currLayer or not layer ): item.setSelected(not item.isSelected())
Inverts the currently selected items in the scene.
def decode_to_bin_keypath(path): """ Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE """ path = encode_to_bin(path) if path[0] == 1: path = path[4:] assert path[0:2] == PREFIX_00 padded_len = TWO_BITS.index(path[2:4]) return path[4+((4 - padded_len) % 4):]
Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE
def validate_table_name(name): """ :param str name: Table name to validate. :raises NameValidationError: |raises_validate_table_name| """ try: validate_sqlite_table_name(name) except (InvalidCharError, InvalidReservedNameError) as e: raise NameValidationError(e) except NullNameError: raise NameValidationError("table name is empty") except ValidReservedNameError: pass
:param str name: Table name to validate. :raises NameValidationError: |raises_validate_table_name|
def dump_bulk(cls, parent=None, keep_ids=True): """Dumps a tree branch to a python data structure.""" cls = get_result_class(cls) # Because of fix_tree, this method assumes that the depth # and numchild properties in the nodes can be incorrect, # so no helper methods are used qset = cls._get_serializable_model().objects.all() if parent: qset = qset.filter(path__startswith=parent.path) ret, lnk = [], {} for pyobj in serializers.serialize('python', qset): # django's serializer stores the attributes in 'fields' fields = pyobj['fields'] path = fields['path'] depth = int(len(path) / cls.steplen) # this will be useless in load_bulk del fields['depth'] del fields['path'] del fields['numchild'] if 'id' in fields: # this happens immediately after a load_bulk del fields['id'] newobj = {'data': fields} if keep_ids: newobj['id'] = pyobj['pk'] if (not parent and depth == 1) or\ (parent and len(path) == len(parent.path)): ret.append(newobj) else: parentpath = cls._get_basepath(path, depth - 1) parentobj = lnk[parentpath] if 'children' not in parentobj: parentobj['children'] = [] parentobj['children'].append(newobj) lnk[path] = newobj return ret
Dumps a tree branch to a python data structure.
def post_user_contact_lists_contacts(self, id, contact_list_id, **data): """ POST /users/:id/contact_lists/:contact_list_id/contacts/ Adds a new contact to the contact list. Returns ``{"created": true}``. There is no way to update entries in the list; just delete the old one and add the updated version. """ return self.post("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data)
POST /users/:id/contact_lists/:contact_list_id/contacts/ Adds a new contact to the contact list. Returns ``{"created": true}``. There is no way to update entries in the list; just delete the old one and add the updated version.
def get_first_properties(elt, keys=None, ctx=None): """Get first properties related to one input key. :param elt: first property elt. Not None methods. :param list keys: property keys to get. :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: dict of first values of elt properties. """ # ensure keys is an iterable if not None if isinstance(keys, string_types): keys = (keys,) result = _get_properties(elt, keys=keys, first=True, ctx=ctx) return result
Get first properties related to one input key. :param elt: first property elt. Not None methods. :param list keys: property keys to get. :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: dict of first values of elt properties.
def create_secgroup_rule(self, protocol, from_port, to_port, source, target): """ Creates a new server security group rule. :param str protocol: E.g. ``tcp``, ``icmp``, etc... :param int from_port: E.g. ``1`` :param int to_port: E.g. ``65535`` :param str source: :param str target: The target security group. I.e. the group in which this rule should be created. """ kwargs = { 'ip_protocol': protocol, 'from_port': from_port, 'to_port': to_port } sg = self.find_secgroup(target).ec2sg if not sg: raise BangError("Security group not found, %s" % target) if '/' in source: # Treat as cidr (/ is mask) kwargs['cidr_ip'] = source else: kwargs['src_group'] = self.find_secgroup(source).ec2sg sg.authorize(**kwargs)
Creates a new server security group rule. :param str protocol: E.g. ``tcp``, ``icmp``, etc... :param int from_port: E.g. ``1`` :param int to_port: E.g. ``65535`` :param str source: :param str target: The target security group. I.e. the group in which this rule should be created.
def load(cls, filename, gzipped, byteorder='big'): """Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian. """ open_file = gzip.open if gzipped else open with open_file(filename, 'rb') as buff: return cls.from_buffer(buff, byteorder)
Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian.
def update(self, figure): """Updates figure on data change Parameters ---------- * figure: matplotlib.figure.Figure \tMatplotlib figure object that is displayed in self """ if hasattr(self, "figure_canvas"): self.figure_canvas.Destroy() self.figure_canvas = self._get_figure_canvas(figure) self.figure_canvas.SetSize(self.GetSize()) figure.subplots_adjust() self.main_sizer.Add(self.figure_canvas, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0) self.Layout() self.figure_canvas.draw()
Updates figure on data change Parameters ---------- * figure: matplotlib.figure.Figure \tMatplotlib figure object that is displayed in self
def process_escape(self, char): '''Handle the char after the escape char''' # Always only run once, switch back to the last processor. self.process_char = self.last_process_char if self.part == [] and char in self.whitespace: # Special case where \ is by itself and not at the EOL. self.parts.append(self.escape_char) return if char == self.eol_char: # Ignore a cr. return unescaped = self.escape_results.get(char, self.escape_char+char) self.part.append(unescaped)
Handle the char after the escape char
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number
async def dispatch_request( self, request_context: Optional[RequestContext]=None, ) -> ResponseReturnValue: """Dispatch the request to the view function. Arguments: request_context: The request context, optional as Flask omits this argument. """ request_ = (request_context or _request_ctx_stack.top).request if request_.routing_exception is not None: raise request_.routing_exception if request_.method == 'OPTIONS' and request_.url_rule.provide_automatic_options: return await self.make_default_options_response() handler = self.view_functions[request_.url_rule.endpoint] return await handler(**request_.view_args)
Dispatch the request to the view function. Arguments: request_context: The request context, optional as Flask omits this argument.
def add_custom_fields(cls, *args, **kw): """ Add any custom fields defined in the configuration. """ for factory in config.custom_field_factories: for field in factory(): setattr(cls, field.name, field)
Add any custom fields defined in the configuration.
def right_join(self, table, one=None, operator=None, two=None): """ Add a right join to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder """ if isinstance(table, JoinClause): table.type = "right" return self.join(table, one, operator, two, "right")
Add a right join to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder
def from_etree(cls, etree_element): """ creates a ``SaltEdge`` instance from the etree representation of an <edges> element from a SaltXMI file. """ ins = SaltElement.from_etree(etree_element) # TODO: this looks dangerous, ask Stackoverflow about it! ins.__class__ = SaltEdge.mro()[0] # convert SaltElement into SaltEdge ins.layers = get_layer_ids(etree_element) ins.source = get_node_id(etree_element, 'source') ins.target = get_node_id(etree_element, 'target') return ins
creates a ``SaltEdge`` instance from the etree representation of an <edges> element from a SaltXMI file.
def collect_output(mr_out_dir, out_file=None): """ Return all mapreduce output in ``mr_out_dir``. Append the output to ``out_file`` if provided. Otherwise, return the result as a single string (it is the caller's responsibility to ensure that the amount of data retrieved fits into memory). """ if out_file is None: output = [] for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn, "rt") as f: output.append(f.read()) return "".join(output) else: block_size = 16777216 with open(out_file, 'a') as o: for fn in iter_mr_out_files(mr_out_dir): with hdfs.open(fn) as f: data = f.read(block_size) while len(data) > 0: o.write(data) data = f.read(block_size)
Return all mapreduce output in ``mr_out_dir``. Append the output to ``out_file`` if provided. Otherwise, return the result as a single string (it is the caller's responsibility to ensure that the amount of data retrieved fits into memory).
def to_json_object(self): """Returns a dict representation that can be serialized to JSON.""" obj_dict = dict(namespace_start=self.namespace_start, namespace_end=self.namespace_end) if self.app is not None: obj_dict['app'] = self.app return obj_dict
Returns a dict representation that can be serialized to JSON.
def set_mode_cb(self, mode, tf): """Called when one of the Move/Draw/Edit radio buttons is selected.""" if tf: self.canvas.set_draw_mode(mode) if mode == 'edit': self.edit_select_cuts() return True
Called when one of the Move/Draw/Edit radio buttons is selected.
def save_and_close_enable(self, top_left, bottom_right): """Handle the data change event to enable the save and close button.""" self.btn_save_and_close.setEnabled(True) self.btn_save_and_close.setAutoDefault(True) self.btn_save_and_close.setDefault(True)
Handle the data change event to enable the save and close button.
def _getFromTime(self, atDate=None): """ Time that the event starts (in the local time zone). """ return getLocalTime(self.date_from, self.time_from, self.tz)
Time that the event starts (in the local time zone).
def decodeLength(length): """ Decode length based on given bytes. :param length: Bytes string to decode. :return: Decoded length. """ bytes_length = len(length) if bytes_length < 2: offset = b'\x00\x00\x00' XOR = 0 elif bytes_length < 3: offset = b'\x00\x00' XOR = 0x8000 elif bytes_length < 4: offset = b'\x00' XOR = 0xC00000 elif bytes_length < 5: offset = b'' XOR = 0xE0000000 else: raise ConnectionError('Unable to decode length of {}'.format(length)) decoded = unpack('!I', (offset + length))[0] decoded ^= XOR return decoded
Decode length based on given bytes. :param length: Bytes string to decode. :return: Decoded length.
def ispercolating(am, inlets, outlets, mode='site'): r""" Determines if a percolating clusters exists in the network spanning the given inlet and outlet sites Parameters ---------- am : adjacency_matrix The adjacency matrix with the ``data`` attribute indicating if a bond is occupied or not inlets : array_like An array of indices indicating which sites are part of the inlets outlets : array_like An array of indices indicating which sites are part of the outlets mode : string Indicates which type of percolation to apply, either `'site'` or `'bond'` """ if am.format is not 'coo': am = am.to_coo() ij = sp.vstack((am.col, am.row)).T if mode.startswith('site'): occupied_sites = sp.zeros(shape=am.shape[0], dtype=bool) occupied_sites[ij[am.data].flatten()] = True clusters = site_percolation(ij, occupied_sites) elif mode.startswith('bond'): occupied_bonds = am.data clusters = bond_percolation(ij, occupied_bonds) ins = sp.unique(clusters.sites[inlets]) if ins[0] == -1: ins = ins[1:] outs = sp.unique(clusters.sites[outlets]) if outs[0] == -1: outs = outs[1:] hits = sp.in1d(ins, outs) return sp.any(hits)
r""" Determines if a percolating clusters exists in the network spanning the given inlet and outlet sites Parameters ---------- am : adjacency_matrix The adjacency matrix with the ``data`` attribute indicating if a bond is occupied or not inlets : array_like An array of indices indicating which sites are part of the inlets outlets : array_like An array of indices indicating which sites are part of the outlets mode : string Indicates which type of percolation to apply, either `'site'` or `'bond'`
def get_family_admin_session(self): """Gets the ``OsidSession`` associated with the family administrative service. return: (osid.relationship.FamilyAdminSession) - a ``FamilyAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.* """ if not self.supports_family_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.FamilyAdminSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the family administrative service. return: (osid.relationship.FamilyAdminSession) - a ``FamilyAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.*
def adjacent(geohash, direction): """Return the adjacent geohash for a given direction.""" # Based on an MIT licensed implementation by Chris Veness from: # http://www.movable-type.co.uk/scripts/geohash.html assert direction in 'nsew', "Invalid direction: %s"%direction assert geohash, "Invalid geohash: %s"%geohash neighbor = { 'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ], 's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ], 'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ], 'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ] } border = { 'n': [ 'prxz', 'bcfguvyz' ], 's': [ '028b', '0145hjnp' ], 'e': [ 'bcfguvyz', 'prxz' ], 'w': [ '0145hjnp', '028b' ] } last = geohash[-1] parent = geohash[0:-1] t = len(geohash) % 2 # Check for edge cases if (last in border[direction][t]) and (parent): parent = adjacent(parent, direction) return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
Return the adjacent geohash for a given direction.
def from_labeled_point(rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels """ features = np.asarray( rdd.map(lambda lp: from_vector(lp.features)).collect()) labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32') if categorical: if not nb_classes: nb_classes = np.max(labels) + 1 temp = np.zeros((len(labels), nb_classes)) for i, label in enumerate(labels): temp[i, label] = 1. labels = temp return features, labels
Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels
def _update(self): """Update the current model using one round of Gibbs sampling. """ initial_time = time.time() self._updateHiddenStateTrajectories() self._updateEmissionProbabilities() self._updateTransitionMatrix() final_time = time.time() elapsed_time = final_time - initial_time logger().info("BHMM update iteration took %.3f s" % elapsed_time)
Update the current model using one round of Gibbs sampling.
def addInHeaderInfo(self, name, type, namespace, element_type=0, mustUnderstand=0): """Add an input SOAP header description to the call info.""" headerinfo = HeaderInfo(name, type, namespace, element_type) if mustUnderstand: headerinfo.mustUnderstand = 1 self.inheaders.append(headerinfo) return headerinfo
Add an input SOAP header description to the call info.
def calculate_perimeters(labels, indexes): """Count the distances between adjacent pixels in the perimeters of the labels""" # # Create arrays that tell whether a pixel is like its neighbors. # index = 0 is the pixel -1,-1 from the pixel of interest, 1 is -1,0, etc. # m = table_idx_from_labels(labels) pixel_score = __perimeter_scoring[m] return fixup_scipy_ndimage_result(scind.sum(pixel_score, labels, np.array(indexes,dtype=np.int32)))
Count the distances between adjacent pixels in the perimeters of the labels
def list(self, verbose=True): """Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced. """ self._check() for tarinfo in self: if verbose: print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): print("%10s" % ("%d,%d" \ % (tarinfo.devmajor, tarinfo.devminor)), end=' ') else: print("%10d" % tarinfo.size, end=' ') print("%d-%02d-%02d %02d:%02d:%02d" \ % time.localtime(tarinfo.mtime)[:6], end=' ') print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') if verbose: if tarinfo.issym(): print("->", tarinfo.linkname, end=' ') if tarinfo.islnk(): print("link to", tarinfo.linkname, end=' ') print()
Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced.
def publish(self, subject, payload): """ Sends a PUB command to the server on the specified subject. ->> PUB hello 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 5 """ if self.is_closed: raise ErrConnectionClosed if self.is_draining_pubs: raise ErrConnectionDraining payload_size = len(payload) if payload_size > self._max_payload: raise ErrMaxPayload yield from self._publish(subject, _EMPTY_, payload, payload_size)
Sends a PUB command to the server on the specified subject. ->> PUB hello 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 5
def get_beta_list(queue, *args): """获取调整后的风险因子列表,用于归一化风险因子系数 Keyword arguments: queue -- 标题候选队列 *args -- 强化ef,客串如多个list Return: beta_list -- 所有候选标题的beta,list类型 """ beta_list = [] for i in queue: c = CDM(i) beta_list.append(c.get_alpha(*args)) return beta_list
获取调整后的风险因子列表,用于归一化风险因子系数 Keyword arguments: queue -- 标题候选队列 *args -- 强化ef,客串如多个list Return: beta_list -- 所有候选标题的beta,list类型
def do_until(lambda_expr, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5, message=None): ''' A retry wrapper that'll keep performing the action until it succeeds. (main differnce between do_until and wait_until is do_until will keep trying until a value is returned, while wait until will wait until the function evaluates True.) Args: lambda_expr (lambda) : Expression to evaluate. Kwargs: timeout (number): Timeout period in seconds. sleep (number) : Sleep time to wait between iterations message (str) : Provide a message for TimeoutError raised. Returns: The value of the evaluated lambda expression. Usage:: do_until(lambda: driver.find_element_by_id("save").click(), timeout=30, sleep=0.5) Is equivalent to: end_time = datetime.now() + timedelta(seconds=30) while datetime.now() < end_time: try: return driver.find_element_by_id("save").click() except: pass time.sleep(0.5) raise OperationTimeoutError() ''' __check_condition_parameter_is_function(lambda_expr) end_time = datetime.now() + timedelta(seconds=timeout) last_exception = None while datetime.now() < end_time: try: return lambda_expr() except Exception as e: last_exception = e time.sleep(sleep) if message: raise OperationTimeoutError(message, last_exception) else: raise OperationTimeoutError("Operation timed out.", last_exception)
A retry wrapper that'll keep performing the action until it succeeds. (main differnce between do_until and wait_until is do_until will keep trying until a value is returned, while wait until will wait until the function evaluates True.) Args: lambda_expr (lambda) : Expression to evaluate. Kwargs: timeout (number): Timeout period in seconds. sleep (number) : Sleep time to wait between iterations message (str) : Provide a message for TimeoutError raised. Returns: The value of the evaluated lambda expression. Usage:: do_until(lambda: driver.find_element_by_id("save").click(), timeout=30, sleep=0.5) Is equivalent to: end_time = datetime.now() + timedelta(seconds=30) while datetime.now() < end_time: try: return driver.find_element_by_id("save").click() except: pass time.sleep(0.5) raise OperationTimeoutError()
def emit(self, event, *args, **kwargs): """Call all callback functions registered with an event. Any positional and keyword arguments can be passed here, and they will be forwarded to the callback functions. Return the list of callback return results. """ callbacks = self._callbacks.get(event, []) # Call the last callback if this is a single event. single = kwargs.pop('single', None) if single and callbacks: return callbacks[-1](*args, **kwargs) # Otherwise, return the list of callback outputs. res = [] for callback in callbacks: res.append(callback(*args, **kwargs)) return res
Call all callback functions registered with an event. Any positional and keyword arguments can be passed here, and they will be forwarded to the callback functions. Return the list of callback return results.
def create_symbol(self, type_, **kwargs): """ Banana banana """ unique_name = kwargs.get('unique_name') if not unique_name: unique_name = kwargs.get('display_name') kwargs['unique_name'] = unique_name filename = kwargs.get('filename') if filename: filename = os.path.abspath(filename) kwargs['filename'] = os.path.abspath(filename) if unique_name in self.__symbols: warn('symbol-redefined', "%s(unique_name=%s, filename=%s, project=%s)" " has already been defined: %s" % (type_.__name__, unique_name, filename, kwargs.get('project_name'), self.get_symbol(unique_name))) return None aliases = kwargs.pop('aliases', []) for alias in aliases: self.create_symbol(ProxySymbol, unique_name=alias, target=unique_name) symbol = type_() debug('Created symbol with unique name %s' % unique_name, 'symbols') for key, value in list(kwargs.items()): setattr(symbol, key, value) self.__symbols[unique_name] = symbol for alias in aliases: self.__symbols[alias] = symbol self.__aliases[unique_name] = aliases return symbol
Banana banana
def render_app_name(context, app, template="/admin_app_name.html"): """ Render the application name using the default template name. If it cannot find a template matching the given path, fallback to the application name. """ try: template = app['app_label'] + template text = render_to_string(template, context) except: text = app['name'] return text
Render the application name using the default template name. If it cannot find a template matching the given path, fallback to the application name.
def AgregarFrigorifico(self, cuit, nro_planta): "Agrego el frigorifico a la liquidacíon (opcional)." frig = {'cuit': cuit, 'nroPlanta': nro_planta} self.solicitud['datosLiquidacion']['frigorifico'] = frig return True
Agrego el frigorifico a la liquidacíon (opcional).
def ply2gii(in_file, metadata, out_file=None): """Convert from ply to GIfTI""" from pathlib import Path from numpy import eye from nibabel.gifti import ( GiftiMetaData, GiftiCoordSystem, GiftiImage, GiftiDataArray, ) from pyntcloud import PyntCloud in_file = Path(in_file) surf = PyntCloud.from_file(str(in_file)) # Update centroid metadata metadata.update( zip(('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'), ['%.4f' % c for c in surf.centroid]) ) # Prepare data arrays da = ( GiftiDataArray( data=surf.xyz.astype('float32'), datatype='NIFTI_TYPE_FLOAT32', intent='NIFTI_INTENT_POINTSET', meta=GiftiMetaData.from_dict(metadata), coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3)), GiftiDataArray( data=surf.mesh.values, datatype='NIFTI_TYPE_INT32', intent='NIFTI_INTENT_TRIANGLE', coordsys=None)) surfgii = GiftiImage(darrays=da) if out_file is None: out_file = fname_presuffix( in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd())) surfgii.to_filename(str(out_file)) return out_file
Convert from ply to GIfTI
def picture(self, row): """Create a simplified character representation of the data row, which can be pattern matched with a regex """ template = '_Xn' types = (type(None), binary_type, int) def guess_type(v): try: v = text_type(v).strip() except ValueError: v = binary_type(v).strip() #v = v.decode('ascii', 'replace').strip() if not bool(v): return type(None) for t in (float, int, binary_type, text_type): try: return type(t(v)) except: pass def p(e): tm = t = None try: t = guess_type(e) tm = self.type_map.get(t, t) return template[types.index(tm)] except ValueError as e: raise ValueError("Type '{}'/'{}' not in the types list: {} ({})".format(t, tm, types, e)) return ''.join(p(e) for e in row)
Create a simplified character representation of the data row, which can be pattern matched with a regex
def last(symbol: str): """ displays last price, for symbol if provided """ app = PriceDbApplication() # convert to uppercase if symbol: symbol = symbol.upper() # extract namespace sec_symbol = SecuritySymbol("", "") sec_symbol.parse(symbol) latest = app.get_latest_price(sec_symbol) assert isinstance(latest, PriceModel) print(f"{latest}") else: # Show the latest prices available for all securities. latest = app.get_latest_prices() for price in latest: print(f"{price}")
displays last price, for symbol if provided
def freeze(sess, output_file_path, output_node_names): """Freeze and shrink the graph based on a session and the output node names.""" with TemporaryDirectory() as temp_dir_name: checkpoint_path = os.path.join(temp_dir_name, 'model.ckpt') tf.train.Saver().save(sess, checkpoint_path) freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
Freeze and shrink the graph based on a session and the output node names.
def patterson_d(aca, acb, acc, acd): """Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den
Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value.
def cons(head_ele,l,**kwargs): ''' from elist.elist import * ol=[1,2,3,4] id(ol) new = cons(5,ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cons(5,ol,mode="original") rslt id(rslt) ''' if('mode' in kwargs): mode = kwargs['mode'] else: mode = "new" return(prepend(l,head_ele,mode=mode))
from elist.elist import * ol=[1,2,3,4] id(ol) new = cons(5,ol) new id(new) #### ol=[1,2,3,4] id(ol) rslt = cons(5,ol,mode="original") rslt id(rslt)
def at(self, timestamp): """ Force the create date of an object to be at a certain time; This method can be invoked only on a freshly created Versionable object. It must not have been cloned yet. Raises a SuspiciousOperation exception, otherwise. :param timestamp: a datetime.datetime instance """ # Ensure, it's not a historic item if not self.is_current: raise SuspiciousOperation( "Cannot relocate this Versionable instance in time, since it " "is a historical item") # Ensure it's not a versioned item (that would lead to some ugly # situations... if not self.version_birth_date == self.version_start_date: raise SuspiciousOperation( "Cannot relocate this Versionable instance in time, since it " "is a versioned instance") # Ensure the argument is really a timestamp if not isinstance(timestamp, datetime.datetime): raise ValueError("This is not a datetime.datetime timestamp") self.version_birth_date = self.version_start_date = timestamp return self
Force the create date of an object to be at a certain time; This method can be invoked only on a freshly created Versionable object. It must not have been cloned yet. Raises a SuspiciousOperation exception, otherwise. :param timestamp: a datetime.datetime instance
def get_default_template(): """ Returns default getTemplate request specification. :return: """ return { "format": 1, "protocol": 1, "environment": Environment.DEV, # shows whether the UO should be for production (live), # test (pre-production testing), or dev (development) "maxtps": "one", # maximum guaranteed TPS "core": "empty", # how many cards have UO loaded permanently "persistence": "one_minute", # once loaded onto card, how long will the UO stay there without use # (this excludes the "core") "priority": "default", # this defines a) priority when the server capacity is fully utilised and it also # defines how quickly new copies of UO are installed (pre-empting icreasing demand) "separation": "time", # "complete" = only one UO can be loaded on a smartcard at one one time "bcr": TemplateFields.yes, # "yes" will ensure the UO is replicated to provide high availability for any # possible service disruption "unlimited": TemplateFields.yes, # if "yes", we expect the data starts with an IV to initialize decryption # of data - this is for communication security "clientiv": TemplateFields.yes, # if "yes", we expect the data starting with a diversification 16B for # communication keys "clientdiv": TemplateFields.no, "resource": "global", "credit": 32677, # <1-32767>, a limit a seed card can provide to the EB service TemplateFields.generation: { TemplateFields.commkey: Gen.CLIENT, TemplateFields.billingkey: Gen.LEGACY_RANDOM, TemplateFields.appkey: Gen.LEGACY_RANDOM } }
Returns default getTemplate request specification. :return:
def pillar_dir(self): ''' Returns the directory of the pillars (repo cache + branch + root) ''' repo_dir = self.repo_dir root = self.root branch = self.branch if branch == 'trunk' or branch == 'base': working_dir = os.path.join(repo_dir, 'trunk', root) if not os.path.isdir(working_dir): log.error('Could not find %s/trunk/%s', self.repo_location, root) else: return os.path.normpath(working_dir) working_dir = os.path.join(repo_dir, 'branches', branch, root) if os.path.isdir(working_dir): return os.path.normpath(working_dir) working_dir = os.path.join(working_dir, 'tags', branch, root) if os.path.isdir(working_dir): return os.path.normpath(working_dir) log.error('Could not find %s/branches/%s/%s', self.repo_location, branch, root) return repo_dir
Returns the directory of the pillars (repo cache + branch + root)
def register_callbacks(self, on_create, on_modify, on_delete): """ Register callbacks for file creation, modification, and deletion """ self.on_create = on_create self.on_modify = on_modify self.on_delete = on_delete
Register callbacks for file creation, modification, and deletion
def changes_in(self, rev_or_range): """ :API: public """ try: return self._scm.changes_in(rev_or_range, relative_to=get_buildroot()) except Scm.ScmException as e: raise self.WorkspaceError("Problem detecting changes in {}.".format(rev_or_range), e)
:API: public
def get_edxml_with_aws_urls(self): """stub""" edxml = self.get_edxml() soup = BeautifulSoup(edxml, 'xml') attrs = { 'draggable': 'icon', 'drag_and_drop_input': 'img', 'files': 'included_files', 'img': 'src' } # replace all file listings with an appropriate path... if len(self.my_osid_object.object_map['fileIds']) > 0: file_map = self.my_osid_object.get_files() for file_label, url in file_map.items(): local_regex = re.compile(file_label + r'\.') for key, attr in attrs.items(): search = {attr: local_regex} tags = soup.find_all(**search) for item in tags: item[attr] = url return soup.find('problem').prettify()
stub
def check_auth(user): ''' Check if the user should or shouldn't be inside the system: - If the user is staff or superuser: LOGIN GRANTED - If the user has a Person and it is not "disabled": LOGIN GRANTED - Elsewhere: LOGIN DENIED ''' # Initialize authentication auth = None person = None # Check if there is an user if user: # It means that Django accepted the user and it is active if user.is_staff or user.is_superuser: # This is an administrator, let it in auth = user else: # It is a normal user, check if there is a person behind person = getattr(user, "person", None) if not person: # Check if there is related one person_related = getattr(user, "people", None) if person_related: # Must be only one if person_related.count() == 1: person = person_related.get() if person and ((person.disabled is None) or (person.disabled > timezone.now())): # There is a person, no disabled found or the found one is fine to log in auth = user # Return back the final decision return auth
Check if the user should or shouldn't be inside the system: - If the user is staff or superuser: LOGIN GRANTED - If the user has a Person and it is not "disabled": LOGIN GRANTED - Elsewhere: LOGIN DENIED
def detect_stream_mode(stream): ''' detect_stream_mode - Detect the mode on a given stream @param stream <object> - A stream object If "mode" is present, that will be used. @return <type> - "Bytes" type or "str" type ''' # If "Mode" is present, pull from that if hasattr(stream, 'mode'): if 'b' in stream.mode: return bytes elif 't' in stream.mode: return str # Read a zero-length string off the device if hasattr(stream, 'read'): zeroStr = stream.read(0) if type(zeroStr) is str: return str return bytes elif hasattr(stream, 'recv'): zeroStr = stream.recv(0) if type(zeroStr) is str: return str return bytes # Cannot figure it out, assume bytes. return bytes
detect_stream_mode - Detect the mode on a given stream @param stream <object> - A stream object If "mode" is present, that will be used. @return <type> - "Bytes" type or "str" type
def __clear_in_buffer(self): """ Zeros out the in buffer :return: None """ self.__in_buffer.value = bytes(b'\0' * len(self.__in_buffer))
Zeros out the in buffer :return: None
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """ if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format.
def process(self): """ Loops over the underlying queue of events and processes them in order. """ assert self.queue is not None while True: event = self.queue.get() if self.pre_process_event(event): self.invoke_handlers(event) self.queue.task_done()
Loops over the underlying queue of events and processes them in order.
def unionfs(rw='rw', ro=None, union='union'): """ Decorator for the UnionFS feature. This configures a unionfs for projects. The given base_dir and/or image_dir are layered as follows: image_dir=RW:base_dir=RO All writes go to the image_dir, while base_dir delivers the (read-only) versions of the rest of the filesystem. The unified version will be provided in the project's builddir. Unmouting is done as soon as the function completes. Args: rw: writeable storage area for the unified fuse filesystem. ro: read-only storage area for the unified fuse filesystem. union: mountpoint of the unified fuse filesystem. """ from functools import wraps def wrap_in_union_fs(func): """ Function that wraps a given function inside the file system. Args: func: The function that needs to be wrapped inside the unions fs. Return: The file system with the function wrapped inside. """ @wraps(func) def wrap_in_union_fs_func(project, *args, **kwargs): """ Wrap the func in the UnionFS mount stack. We make sure that the mount points all exist and stack up the directories for the unionfs. All directories outside of the default build environment are tracked for deletion. """ container = project.container if container is None or in_container(): return func(project, *args, **kwargs) build_dir = local.path(project.builddir) LOG.debug("UnionFS - Project builddir: %s", project.builddir) if __unionfs_is_active(root=build_dir): LOG.debug( "UnionFS already active in %s, nesting not supported.", build_dir) return func(project, *args, **kwargs) ro_dir = local.path(container.local) rw_dir = build_dir / rw un_dir = build_dir / union LOG.debug("UnionFS - RW: %s", rw_dir) unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir) project_builddir_bak = project.builddir project.builddir = un_dir proc = unionfs_cmd.popen() while (not __unionfs_is_active(root=un_dir)) and \ (proc.poll() is None): pass ret = None if proc.poll() is None: try: with local.cwd(un_dir): ret = func(project, *args, **kwargs) finally: project.builddir = project_builddir_bak from signal import SIGINT is_running = proc.poll() is None while __unionfs_is_active(root=un_dir) and is_running: try: proc.send_signal(SIGINT) proc.wait(timeout=3) except subprocess.TimeoutExpired: proc.kill() is_running = False LOG.debug("Unionfs shut down.") if __unionfs_is_active(root=un_dir): raise UnmountError() return ret return wrap_in_union_fs_func return wrap_in_union_fs
Decorator for the UnionFS feature. This configures a unionfs for projects. The given base_dir and/or image_dir are layered as follows: image_dir=RW:base_dir=RO All writes go to the image_dir, while base_dir delivers the (read-only) versions of the rest of the filesystem. The unified version will be provided in the project's builddir. Unmouting is done as soon as the function completes. Args: rw: writeable storage area for the unified fuse filesystem. ro: read-only storage area for the unified fuse filesystem. union: mountpoint of the unified fuse filesystem.
def delete(self): """Delete this stream from Device Cloud along with its history This call will return None on success and raise an exception in the event of an error performing the deletion. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted """ try: self._conn.delete("/ws/DataStream/{}".format(self.get_stream_id())) except DeviceCloudHttpException as http_excpeption: if http_excpeption.response.status_code == 404: raise NoSuchStreamException() # this branch is present, but the DC appears to just return 200 again else: raise http_excpeption
Delete this stream from Device Cloud along with its history This call will return None on success and raise an exception in the event of an error performing the deletion. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted
def get_language(self): """\ Returns the language is by the article or the configuration language """ # we don't want to force the target language # so we use the article.meta_lang if self.config.use_meta_language: if self.article.meta_lang: return self.article.meta_lang[:2] return self.config.target_language
\ Returns the language is by the article or the configuration language
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, schönes => schöneres return adjective[:len(b)] + suffix + adjective[len(b):]
Returns the comparative or superlative form of the given (inflected) adjective.
def _startRecording(self, filename): """ Start recording the session to a file for debug purposes. """ self.setOption('_log_file_name', filename) self.setOption('_log_input_only', True) self.setOption('_log', True)
Start recording the session to a file for debug purposes.
def insert_paragraph_before(self, text=None, style=None): """ Return a newly created paragraph, inserted directly before this paragraph. If *text* is supplied, the new paragraph contains that text in a single run. If *style* is provided, that style is assigned to the new paragraph. """ paragraph = self._insert_paragraph_before() if text: paragraph.add_run(text) if style is not None: paragraph.style = style return paragraph
Return a newly created paragraph, inserted directly before this paragraph. If *text* is supplied, the new paragraph contains that text in a single run. If *style* is provided, that style is assigned to the new paragraph.
def _isLastCodeColumn(self, block, column): """Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment. """ return column >= self._lastColumn(block) or \ self._isComment(block, self._nextNonSpaceColumn(block, column + 1))
Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment.
def cdsparse(self, record): """ Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record """ try: # Find genes that are present in all strains of interest - the number of times the gene is found is # equal to the number of strains. Earlier parsing ensures that the same gene is not present in a strain # more than once if self.genes[self.genenames[record.id]] == len(self.runmetadata.samples): # Add the gene names and sequences to the appropriate dictionaries try: self.genesequence[self.genenames[record.id]].add(str(record.seq)) # Initialise the dictionary as required, then populate as above except KeyError: self.genesequence[self.genenames[record.id]] = set() self.genesequence[self.genenames[record.id]].add(str(record.seq)) try: self.coresequence[str(record.seq)].add(record.id) except KeyError: self.coresequence[str(record.seq)] = set() self.coresequence[str(record.seq)].add(record.id) except KeyError: pass
Finds core genes, and records gene names and sequences in dictionaries :param record: SeqIO record
def consume_item(rlp, start): """Read an item from an RLP string. :param rlp: the rlp string to read from :param start: the position at which to start reading :returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is the read item, per_item_rlp is a list containing the RLP encoding of each item and ``end`` is the position of the first unprocessed byte """ p, t, l, s = consume_length_prefix(rlp, start) return consume_payload(rlp, p, s, t, l)
Read an item from an RLP string. :param rlp: the rlp string to read from :param start: the position at which to start reading :returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is the read item, per_item_rlp is a list containing the RLP encoding of each item and ``end`` is the position of the first unprocessed byte
def tomof(self, indent=MOF_INDENT, maxline=MAX_MOF_LINE, line_pos=0): """ Return a MOF string with the specification of this CIM qualifier as a qualifier value. The items of array values are tried to keep on the same line. If the generated line would exceed the maximum MOF line length, the value is split into multiple lines, on array item boundaries, and/or within long strings on word boundaries. If a string value (of a scalar value, or of an array item) is split into multiple lines, the first line of the value is put onto a line on its own. Parameters: indent (:term:`integer`): For a multi-line result, the number of spaces to indent each line except the first line (on which the qualifier name appears). For a single-line result, ignored. Returns: :term:`unicode string`: MOF string. """ mof = [] mof.append(self.name) mof.append(u' ') if isinstance(self.value, list): mof.append(u'{') else: mof.append(u'(') line_pos += len(u''.join(mof)) # Assume in line_pos that the extra space would be needed val_str, line_pos = _value_tomof( self.value, self.type, indent, maxline, line_pos + 1, 3, True) # Empty arrays are represented as val_str='' if val_str and val_str[0] != '\n': # The extra space was actually needed mof.append(u' ') else: # Adjust by the extra space that was not needed line_pos -= 1 mof.append(val_str) if isinstance(self.value, list): mof.append(u' }') else: mof.append(u' )') mof_str = u''.join(mof) return mof_str
Return a MOF string with the specification of this CIM qualifier as a qualifier value. The items of array values are tried to keep on the same line. If the generated line would exceed the maximum MOF line length, the value is split into multiple lines, on array item boundaries, and/or within long strings on word boundaries. If a string value (of a scalar value, or of an array item) is split into multiple lines, the first line of the value is put onto a line on its own. Parameters: indent (:term:`integer`): For a multi-line result, the number of spaces to indent each line except the first line (on which the qualifier name appears). For a single-line result, ignored. Returns: :term:`unicode string`: MOF string.
def to_mongo(self, disjunction=True): """Create from current state a valid MongoDB query expression. :return: MongoDB query expression :rtype: dict """ q = {} # add all the main clauses to `q` clauses = [e.expr for e in self._main] if clauses: if disjunction: if len(clauses) + len(self._where) > 1: q['$or'] = clauses else: # simplify 'or' of one thing q.update(clauses[0]) else: for c in clauses: q.update(c) # add all the main2 clauses; these are not or'ed for c in (e.expr for e in self._main2): # add to existing stuff for the field for field in c: if field in q: q[field].update(c[field]) else: q.update(c) # add where clauses, if any, to `q` if self._where: wsep = ' || ' if self._where[0].is_reversed else ' && ' where_clause = wsep.join([w.expr for w in self._where]) if disjunction: if not '$or' in q: q['$or'] = [] q['$or'].append({'$where': where_clause}) else: q['$where'] = where_clause return q
Create from current state a valid MongoDB query expression. :return: MongoDB query expression :rtype: dict
def _get_record(self, record_type): """This overrides _get_record in osid.Extensible. Perhaps we should leverage it somehow? """ if (not self.has_record_type(record_type) and record_type.get_identifier() not in self._record_type_data_sets): raise errors.Unsupported() if str(record_type) not in self._records: record_initialized = self._init_record(str(record_type)) if record_initialized and str(record_type) not in self._my_map['recordTypeIds']: self._my_map['recordTypeIds'].append(str(record_type)) return self._records[str(record_type)]
This overrides _get_record in osid.Extensible. Perhaps we should leverage it somehow?
def get_default_datatable_kwargs(self, **kwargs): """ Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details. """ kwargs['view'] = self # This is provided by default, but if the view is instantiated outside of the request cycle # (such as for the purposes of embedding that view's datatable elsewhere), the request may # not be required, so the user may not have a compelling reason to go through the trouble of # putting it on self. if hasattr(self, 'request'): kwargs['url'] = self.request.path kwargs['query_config'] = getattr(self.request, self.request.method) else: kwargs['query_config'] = {} return kwargs
Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details.
async def flush(self, request: Request, stacks: List[Stack]): """ Add a typing stack after each stack. """ ns: List[Stack] = [] for stack in stacks: ns.extend(self.typify(stack)) if len(ns) > 1 and ns[-1] == Stack([lyr.Typing()]): ns[-1].get_layer(lyr.Typing).active = False await self.next(request, ns)
Add a typing stack after each stack.
def namedb_select_where_unexpired_names(current_block, only_registered=True): """ Generate part of a WHERE clause that selects from name records joined with namespaces (or projections of them) that are not expired. Also limit to names that are registered at this block, if only_registered=True. If only_registered is False, then as long as current_block is before the expire block, then the name will be returned (but the name may not have existed at that block) """ ns_lifetime_multiplier = get_epoch_namespace_lifetime_multiplier(current_block, '*') ns_grace_period = get_epoch_namespace_lifetime_grace_period(current_block, '*') unexpired_query_fragment = "(" + \ "(" + \ "namespaces.op = ? AND " + \ "(" + \ "(namespaces.ready_block + ((namespaces.lifetime * {}) + {}) > ?) OR ".format(ns_lifetime_multiplier, ns_grace_period) + \ "(name_records.last_renewed + ((namespaces.lifetime * {}) + {}) >= ?)".format(ns_lifetime_multiplier, ns_grace_period) + \ ")" + \ ") OR " + \ "(" + \ "namespaces.op = ? AND namespaces.reveal_block <= ? AND ? < namespaces.reveal_block + ?" + \ ")" + \ ")" unexpired_query_args = (NAMESPACE_READY, current_block, current_block, NAMESPACE_REVEAL, current_block, current_block, NAMESPACE_REVEAL_EXPIRE) if only_registered: # also limit to only names registered before this block unexpired_query_fragment = '(name_records.first_registered <= ? AND {})'.format(unexpired_query_fragment) unexpired_query_args = (current_block,) + unexpired_query_args return (unexpired_query_fragment, unexpired_query_args)
Generate part of a WHERE clause that selects from name records joined with namespaces (or projections of them) that are not expired. Also limit to names that are registered at this block, if only_registered=True. If only_registered is False, then as long as current_block is before the expire block, then the name will be returned (but the name may not have existed at that block)
def R(self,*args,**kwargs): """ NAME: R PURPOSE: return cylindrical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) """ thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return thiso[0] else: return thiso[0,:]
NAME: R PURPOSE: return cylindrical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R(t) HISTORY: 2010-09-21 - Written - Bovy (NYU)
def _load_data(self): """ Load data from raw_data or file_path """ if self.raw_data is None and self.data_format is not FormatType.PYTHON: if self.file_path is None: raise ArgumentInvalid('One of "raw_data" or "file_path" should be set!') if not os.path.isfile(self.file_path) or not os.access(self.file_path, os.R_OK): raise ArgumentInvalid('"file_path" should be a valid path to an exist file with read permission!') with open(self.file_path) as f: self.raw_data = f.read()
Load data from raw_data or file_path
def cur_model(model=None): """Get and/or set the current model. If ``model`` is given, set the current model to ``model`` and return it. ``model`` can be the name of a model object, or a model object itself. If ``model`` is not given, the current model is returned. """ if model is None: if _system.currentmodel is not None: return _system.currentmodel.interface else: return None else: if isinstance(model, _Model): _system.currentmodel = model._impl else: _system.currentmodel = _system.models[model] return _system.currentmodel.interface
Get and/or set the current model. If ``model`` is given, set the current model to ``model`` and return it. ``model`` can be the name of a model object, or a model object itself. If ``model`` is not given, the current model is returned.
def clean_all(G, settings): """ Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed """ quiet = settings["quiet"] recon = settings["recon"] sprint = settings["sprint"] error = settings["error"] all_outputs = [] for node in G.nodes(data=True): if "output" in node[1]: for item in get_all_outputs(node[1]): all_outputs.append(item) all_outputs.append(".shastore") retcode = 0 for item in sorted(all_outputs): if os.path.isfile(item): if recon: sprint("Would remove file: {}".format(item)) continue sprint("Attempting to remove file '{}'", level="verbose") try: os.remove(item) sprint("Removed file", level="verbose") except: errmes = "Error: file '{}' failed to be removed" error(errmes.format(item)) retcode = 1 if not retcode and not recon: sprint("All clean", color=True) return retcode
Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed
def parse_full_atom(data): """Some atoms are versioned. Split them up in (version, flags, payload). Can raise ValueError. """ if len(data) < 4: raise ValueError("not enough data") version = ord(data[0:1]) flags = cdata.uint_be(b"\x00" + data[1:4]) return version, flags, data[4:]
Some atoms are versioned. Split them up in (version, flags, payload). Can raise ValueError.
def clean(self, data): """Method returns cleaned list of stock closing prices (i.e. dict(date=datetime.date(2015, 1, 2), price='23.21')).""" cleaned_data = list() if not isinstance(data, list): data = [data] for item in data: date = datetime.datetime.strptime(item['Date'], '%Y-%m-%d').date() cleaned_data.append(dict(price=item['Adj_Close'], date=date)) return cleaned_data
Method returns cleaned list of stock closing prices (i.e. dict(date=datetime.date(2015, 1, 2), price='23.21')).
def read_one(self, sequence): """ Reads one item from the Ringbuffer. If the sequence is one beyond the current tail, this call blocks until an item is added. Currently it isn't possible to control how long this call is going to block. :param sequence: (long), the sequence of the item to read. :return: (object), the read item. """ check_not_negative(sequence, "sequence can't be smaller than 0") return self._encode_invoke(ringbuffer_read_one_codec, sequence=sequence)
Reads one item from the Ringbuffer. If the sequence is one beyond the current tail, this call blocks until an item is added. Currently it isn't possible to control how long this call is going to block. :param sequence: (long), the sequence of the item to read. :return: (object), the read item.
def update_subscription_user_settings(self, user_settings, subscription_id, user_id): """UpdateSubscriptionUserSettings. [Preview API] Update the specified user's settings for the specified subscription. This API is typically used to opt in or out of a shared subscription. User settings can only be applied to shared subscriptions, like team subscriptions or default subscriptions. :param :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>` user_settings: :param str subscription_id: :param str user_id: ID of the user :rtype: :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>` """ route_values = {} if subscription_id is not None: route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str') if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') content = self._serialize.body(user_settings, 'SubscriptionUserSettings') response = self._send(http_method='PUT', location_id='ed5a3dff-aeb5-41b1-b4f7-89e66e58b62e', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('SubscriptionUserSettings', response)
UpdateSubscriptionUserSettings. [Preview API] Update the specified user's settings for the specified subscription. This API is typically used to opt in or out of a shared subscription. User settings can only be applied to shared subscriptions, like team subscriptions or default subscriptions. :param :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>` user_settings: :param str subscription_id: :param str user_id: ID of the user :rtype: :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>`
def _receive(self, root, directory, dirs, files, include, exclude): """Internal function processing each yield from os.walk.""" self._received += 1 if not self.symlinks: where = root + os.path.sep + directory + os.path.sep files = [ file_name for file_name in files if not os.path.islink(where + file_name) ] include = FileSetState("Include", directory, include, None if include else self.include) exclude = FileSetState("Exclude", directory, exclude, None if exclude else self.exclude) if exclude.matches_all_files_all_subdirs(): # Exclude everything and do no traverse any subdirectories del dirs[0:] matched = set() else: if include.no_possible_matches_in_subdirs(): # Do no traverse any subdirectories del dirs[0:] matched = include.match(set(files)) matched -= exclude.match(matched) return matched, include, exclude
Internal function processing each yield from os.walk.
def run(self, cmd=None): """ Call this function at the end of your class's `__init__` function. """ diagnostics.prefix.append(self.name) if not cmd: cmd = self.cmd stderr = os.path.abspath(self.name + '.log') self.args.append('2>>'+stderr) if self.pipe: self.args += ('|', self.pipe, '2>>'+stderr) # Write to a stdout file if it was set by the derived class. # Otherwise, stdout and stderr will be combined into the log file. if self.stdout: stdout = os.path.abspath(self.stdout) self.args.append('1>'+stdout) diagnostics.log('stdout', stdout) elif self.stdout_append: stdout = os.path.abspath(self.stdout_append) self.args.append('1>>'+stdout) diagnostics.log('stdout', stdout) else: self.args.append('1>>'+stderr) # Print timestamp to log open(stderr, 'a').write("[biolite] timestamp=%s\n" % utils.timestamp()) diagnostics.log('log', stderr) cmd = ' '.join(chain(cmd, map(str, self.args))) diagnostics.log('command', cmd) start = time.time() save_cwd = os.getcwd() try: os.chdir(self.cwd) spawn_pid = os.spawnle(os.P_NOWAIT, self.shell, self.shell, '-c', cmd, self.env) wait_pid, retcode, rusage = os.wait4(spawn_pid, 0) if wait_pid != spawn_pid: utils.die("could not wait for process %d: got %d" % (spawn_pid, wait_pid)) os.chdir(save_cwd) except OSError as e: utils.info(e) utils.die("could not run wrapper for command:\n%s" % cmd) #utils.failed_executable(exe, e) elapsed = time.time() - start retcode = os.WEXITSTATUS(retcode) if (self.return_ok is not None) and (self.return_ok != retcode): # Give some context to the non-zero return. if os.path.isfile(stderr): subprocess.call(['tail', '-3', stderr]) utils.die("non-zero return (%d) from command:\n%s" % (retcode, cmd)) # Log profile. diagnostics.prefix.append('profile') diagnostics.log('name', self.name) diagnostics.log('return', retcode) diagnostics.log('walltime', elapsed) diagnostics.log('usertime', rusage.ru_utime) diagnostics.log('systime', rusage.ru_stime) if config.uname == 'Darwin': diagnostics.log('maxrss', rusage.ru_maxrss / 1024) else: diagnostics.log('maxrss', rusage.ru_maxrss) diagnostics.prefix.pop() # Reverse any output patterns, since they will be matched against # program output from the last line backward. if self.output_patterns: self.output_patterns.reverse() diagnostics.log_program_output(stderr, self.output_patterns) diagnostics.prefix.pop()
Call this function at the end of your class's `__init__` function.
def debug_log_template(self, record): """ Return the prefix for the log message. Template for Formatter. Parameters ---------- record : :py:class:`logging.LogRecord` This is passed in from inside the :py:meth:`logging.Formatter.format` record. Returns ------- str Log template. """ reset = Style.RESET_ALL levelname = ( LEVEL_COLORS.get(record.levelname) + Style.BRIGHT + '(%(levelname)1.1s)' + Style.RESET_ALL + ' ' ) asctime = ( '[' + Fore.BLACK + Style.DIM + Style.BRIGHT + '%(asctime)s' + Fore.RESET + Style.RESET_ALL + ']' ) name = ( ' ' + Fore.WHITE + Style.DIM + Style.BRIGHT + '%(name)s' + Fore.RESET + Style.RESET_ALL + ' ' ) module_funcName = Fore.GREEN + Style.BRIGHT + '%(module)s.%(funcName)s()' lineno = ( Fore.BLACK + Style.DIM + Style.BRIGHT + ':' + Style.RESET_ALL + Fore.CYAN + '%(lineno)d' ) tpl = reset + levelname + asctime + name + module_funcName + lineno + reset return tpl
Return the prefix for the log message. Template for Formatter. Parameters ---------- record : :py:class:`logging.LogRecord` This is passed in from inside the :py:meth:`logging.Formatter.format` record. Returns ------- str Log template.
def bridge_list(): ''' Lists all existing real and fake bridges. Returns: List of bridges (or empty list), False on failure. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_list ''' cmd = 'ovs-vsctl list-br' result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] stdout = result['stdout'] return _stdout_list_split(retcode, stdout)
Lists all existing real and fake bridges. Returns: List of bridges (or empty list), False on failure. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_list
def profiling_query_formatter(view, context, query_document, name): """Format a ProfilingQuery entry for a ProfilingRequest detail field Parameters ---------- query_document : model.ProfilingQuery """ return Markup( ''.join( [ '<div class="pymongo-query row">', '<div class="col-md-1">', '<a href="{}">'.format(query_document.get_admin_url(_external=True)), mongo_command_name_formatter( view, context, query_document, 'command_name' ), # '<span class="label {}">{}</span>'.format( # command_name_map.get(query_document.command_name, 'label-default'), # query_document.command_name, # ), '</div>', '<div class="col-md-10">', profiling_pure_query_formatter( None, None, query_document, 'command', tag='pre' ), '</div>', '<div class="col-md-1">', '<small>{} ms</small>'.format(query_document.duration), '</a>', '</div>', '</div>', ] ) )
Format a ProfilingQuery entry for a ProfilingRequest detail field Parameters ---------- query_document : model.ProfilingQuery
def plot_shapes(df_shapes, shape_i_columns, axis=None, autoxlim=True, autoylim=True, **kwargs): ''' Plot shapes from table/data-frame where each row corresponds to a vertex of a shape. Shape vertices are grouped by `shape_i_columns`. For example, consider the following dataframe: shape_i vertex_i x y 0 0 0 81.679949 264.69306 1 0 1 81.679949 286.51788 2 0 2 102.87004 286.51788 3 0 3 102.87004 264.69306 4 1 0 103.11417 264.40011 5 1 1 103.11417 242.72177 6 1 2 81.435824 242.72177 7 1 3 81.435824 264.40011 8 2 0 124.84134 264.69306 9 2 1 103.65125 264.69306 10 2 2 103.65125 286.37141 11 2 3 124.84134 286.37141 This dataframe corresponds to three shapes, with (ordered) shape vertices grouped by `shape_i`. Note that the column `vertex_i` is not required. ''' if axis is None: fig, axis = plt.subplots() props = itertools.cycle(mpl.rcParams['axes.prop_cycle']) color = kwargs.pop('fc', None) # Cycle through default colors to set face color, unless face color was set # explicitly. patches = [Polygon(df_shape_i[['x', 'y']].values, fc=props.next()['color'] if color is None else color, **kwargs) for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns)] collection = PatchCollection(patches) axis.add_collection(collection) xy_stats = df_shapes[['x', 'y']].describe() if autoxlim: axis.set_xlim(*xy_stats.x.loc[['min', 'max']]) if autoylim: axis.set_ylim(*xy_stats.y.loc[['min', 'max']]) return axis
Plot shapes from table/data-frame where each row corresponds to a vertex of a shape. Shape vertices are grouped by `shape_i_columns`. For example, consider the following dataframe: shape_i vertex_i x y 0 0 0 81.679949 264.69306 1 0 1 81.679949 286.51788 2 0 2 102.87004 286.51788 3 0 3 102.87004 264.69306 4 1 0 103.11417 264.40011 5 1 1 103.11417 242.72177 6 1 2 81.435824 242.72177 7 1 3 81.435824 264.40011 8 2 0 124.84134 264.69306 9 2 1 103.65125 264.69306 10 2 2 103.65125 286.37141 11 2 3 124.84134 286.37141 This dataframe corresponds to three shapes, with (ordered) shape vertices grouped by `shape_i`. Note that the column `vertex_i` is not required.
def find(self, *args, **kwargs): """Create a :class:`MotorCursor`. Same parameters as for PyMongo's :meth:`~pymongo.collection.Collection.find`. Note that ``find`` does not require an ``await`` expression, because ``find`` merely creates a :class:`MotorCursor` without performing any operations on the server. ``MotorCursor`` methods such as :meth:`~MotorCursor.to_list` perform actual operations. """ cursor = self.delegate.find(*unwrap_args_session(args), **unwrap_kwargs_session(kwargs)) cursor_class = create_class_with_framework( AgnosticCursor, self._framework, self.__module__) return cursor_class(cursor, self)
Create a :class:`MotorCursor`. Same parameters as for PyMongo's :meth:`~pymongo.collection.Collection.find`. Note that ``find`` does not require an ``await`` expression, because ``find`` merely creates a :class:`MotorCursor` without performing any operations on the server. ``MotorCursor`` methods such as :meth:`~MotorCursor.to_list` perform actual operations.
def password(self, password): """ Encode a string and set as password """ from boiler.user.util.passlib import passlib_context password = str(password) encrypted = passlib_context.encrypt(password) self._password = encrypted
Encode a string and set as password
def log(verbose=False): """ print a log test :param verbose: show more logs """ terminal.log.config(verbose=verbose) terminal.log.info('this is a info message') terminal.log.verbose.info('this is a verbose message')
print a log test :param verbose: show more logs
def new(preset, name, silent, update): """Create new default preset \b Usage: $ be new ad "blue_unicorn" created $ be new film --name spiderman "spiderman" created """ if self.isactive(): lib.echo("Please exit current preset before starting a new") sys.exit(lib.USER_ERROR) if not name: count = 0 name = lib.random_name() while name in _extern.projects(): if count > 10: lib.echo("ERROR: Couldn't come up with a unique name :(") sys.exit(lib.USER_ERROR) name = lib.random_name() count += 1 project_dir = lib.project_dir(_extern.cwd(), name) if os.path.exists(project_dir): lib.echo("\"%s\" already exists" % name) sys.exit(lib.USER_ERROR) username, preset = ([None] + preset.split("/", 1))[-2:] presets_dir = _extern.presets_dir() preset_dir = os.path.join(presets_dir, preset) # Is the preset given referring to a repository directly? relative = False if username else True try: if not update and preset in _extern.local_presets(): _extern.copy_preset(preset_dir, project_dir) else: lib.echo("Finding preset for \"%s\".. " % preset, silent) time.sleep(1 if silent else 0) if relative: # Preset is relative, look it up from the Hub presets = _extern.github_presets() if preset not in presets: sys.stdout.write("\"%s\" not found" % preset) sys.exit(lib.USER_ERROR) time.sleep(1 if silent else 0) repository = presets[preset] else: # Absolute paths are pulled directly repository = username + "/" + preset lib.echo("Pulling %s.. " % repository, silent) repository = _extern.fetch_release(repository) # Remove existing preset if preset in _extern.local_presets(): _extern.remove_preset(preset) try: _extern.pull_preset(repository, preset_dir) except IOError as e: lib.echo("ERROR: Sorry, something went wrong.\n" "Use be --verbose for more") lib.echo(e) sys.exit(lib.USER_ERROR) try: _extern.copy_preset(preset_dir, project_dir) finally: # Absolute paths are not stored locally if not relative: _extern.remove_preset(preset) except IOError as exc: if self.verbose: lib.echo("ERROR: %s" % exc) else: lib.echo("ERROR: Could not write, do you have permission?") sys.exit(lib.PROGRAM_ERROR) lib.echo("\"%s\" created" % name, silent)
Create new default preset \b Usage: $ be new ad "blue_unicorn" created $ be new film --name spiderman "spiderman" created
def multiChoiceParam(parameters, name, type_converter = str): """ multi choice parameter values. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str' :returns dictionary: value -> values """ param = parameters.find(".//MultiChoiceParam[@Name='{name}']".format(name=name)) value = param.find('Value') values = param.find('Values') return [type_converter(values[int(item.text)].text) for item in value.findall('Item')]
multi choice parameter values. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str' :returns dictionary: value -> values
def reply_bytes(self, request): """Take a `Request` and return an OP_MSG message as bytes.""" flags = struct.pack("<I", self._flags) payload_type = struct.pack("<b", 0) payload_data = bson.BSON.encode(self.doc) data = b''.join([flags, payload_type, payload_data]) reply_id = random.randint(0, 1000000) response_to = request.request_id header = struct.pack( "<iiii", 16 + len(data), reply_id, response_to, OP_MSG) return header + data
Take a `Request` and return an OP_MSG message as bytes.
def save_data(self, idx): """Save the internal data of all sequences with an activated flag. Write to file if the corresponding disk flag is activated; store in working memory if the corresponding ram flag is activated.""" for name in self: actual = getattr(self, name) diskflag = getattr(self, '_%s_diskflag' % name) ramflag = getattr(self, '_%s_ramflag' % name) if diskflag: file_ = getattr(self, '_%s_file' % name) ndim = getattr(self, '_%s_ndim' % name) length_tot = 1 for jdx in range(ndim): length = getattr(self, '_%s_length_%s' % (name, jdx)) length_tot *= length if ndim: raw = struct.pack(length_tot*'d', *actual.flatten()) else: raw = struct.pack('d', actual) file_.write(raw) elif ramflag: array = getattr(self, '_%s_array' % name) array[idx] = actual
Save the internal data of all sequences with an activated flag. Write to file if the corresponding disk flag is activated; store in working memory if the corresponding ram flag is activated.
def list_deploy_keys(self, auth, username, repo_name): """ List deploy keys for the specified repo. :param auth.Authentication auth: authentication object :param str username: username of owner of repository :param str repo_name: the name of the repo :return: a list of deploy keys for the repo :rtype: List[GogsRepo.DeployKey] :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ response = self.get("/repos/{u}/{r}/keys".format(u=username, r=repo_name), auth=auth) return [GogsRepo.DeployKey.from_json(key_json) for key_json in response.json()]
List deploy keys for the specified repo. :param auth.Authentication auth: authentication object :param str username: username of owner of repository :param str repo_name: the name of the repo :return: a list of deploy keys for the repo :rtype: List[GogsRepo.DeployKey] :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced