content
stringlengths
22
815k
id
int64
0
4.91M
def test_delayed_command_order(): """ delayed commands should be sorted by delay time """ null = lambda: None delays = [random.randint(0, 99) for x in range(5)] cmds = sorted([ schedule.DelayedCommand.after(delay, null) for delay in delays ]) assert [c.delay.seconds for c in cmds] == sorted(delays)
21,300
def get_gdb(chip_name=None, gdb_path=None, log_level=None, log_stream_handler=None, log_file_handler=None, log_gdb_proc_file=None, remote_target=None, remote_address=None, remote_port=None, **kwargs): """ set to != None value to redefine get_gdb logic Parameters ---------- chip_name : Any(None, str) gdb_path : Any(None, str) log_level : Any(None, str) log_stream_handler : Any(None, str) log_file_handler : Any(None, str) log_gdb_proc_file : Any(None, str) remote_target : Any(None, str) remote_address : Any(None, str) remote_port : Any(None, str) Returns ------- Gdb """ _gdb = _str_to_class("Gdb" + get_good_name(chip_name)) return _gdb(gdb_path=gdb_path, log_level=log_level, log_stream_handler=log_stream_handler, log_file_handler=log_file_handler, log_gdb_proc_file=log_gdb_proc_file, remote_target=remote_target, remote_address=remote_address, remote_port=remote_port, **kwargs)
21,301
def parse_source_gpx_file(inp_path, source): """Parse a GPX file having the following structure: <gpx xmlns="http://www.topografix.com/GPX/1/1" xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" creator="Suunto app" version="1.1" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd"> <metadata> <name></name> <desc/> <author> <name></name> </author> </metadata> <trk> <name></name> <trkseg> <trkpt lat="12.345678" lon="-23.456789"> <ele>-3.4</ele> <time>2022-02-22T18:09:02Z</time> <extensions> <gpxtpx:TrackPointExtension> <gpxtpx:hr>95</gpxtpx:hr> </gpxtpx:TrackPointExtension> </extensions> </trkpt> </trkseg> </trk> </gpx> Parameters ---------- inp_path : pathlib.Path() Path of the GPX file to parse source : dict The source configuration Returns ------- gpx : dict Track, track segments, and track points vld_t Time from start of track [s] vld_lambda Geodetic longitude [rad] vld_varphi Geodetic latitude [rad] vld_h Elevation [m] See also: https://en.wikipedia.org/wiki/GPS_Exchange_Format """ logger.info(f"Parsing {inp_path}") # Parse input file parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse(inp_path, parser) # Pretty print input file locally out_path = inp_path.with_name(inp_path.name.replace(".gpx", "-pretty.gpx")) tree.write(out_path, pretty_print=True) # Collect tracks root = tree.getroot() gpx = {} gpx["metadata"] = {} gpx["trks"] = [] for trk_element in root.iter("{http://www.topografix.com/GPX/1/1}trk"): # Collect track segments trk = {} trk["name"] = trk_element.find("{http://www.topografix.com/GPX/1/1}name").text trk["trksegs"] = [] for trkseg_element in root.iter("{http://www.topografix.com/GPX/1/1}trkseg"): # Collect track points trkseg = {} trkseg["lat"] = [] trkseg["lon"] = [] trkseg["ele"] = [] trkseg["time"] = [] start_time = None for trkpt_element in trkseg_element.iter( "{http://www.topografix.com/GPX/1/1}trkpt" ): trkseg["lat"].append( math.radians(float(trkpt_element.get("lat"))) ) # [rad] trkseg["lon"].append( math.radians(float(trkpt_element.get("lon"))) ) # [rad] ele_element = trkpt_element.find( "{http://www.topografix.com/GPX/1/1}ele" ) if ele_element is not None: trkseg["ele"].append(float(ele_element.text)) # [m] else: trkseg["ele"].append(-R_OPLUS) cur_time = datetime.fromisoformat( trkpt_element.find("{http://www.topografix.com/GPX/1/1}time").text[ :-1 ] ) if start_time is None: start_time = cur_time trkseg["time"].append(0.0) else: trkseg["time"].append( (cur_time - start_time).total_seconds() ) # [s] trk["trksegs"].append(trkseg) gpx["trks"].append(trk) # Assign longitude, latitude, elevation, and time from start of # track # TODO: Check single track and track segment assumption _t = np.array( gpx["trks"][0]["trksegs"][0]["time"] ) # time from start of track [s] _lambda = np.array( gpx["trks"][0]["trksegs"][0]["lon"] ) # geodetic longitude [rad] _varphi = np.array( gpx["trks"][0]["trksegs"][0]["lat"] ) # geodetic latitude [rad] _h = np.array(gpx["trks"][0]["trksegs"][0]["ele"]) # elevation [m] # Ignore points at which the elevation was not recorded vld_idx = np.logical_and( np.logical_and(source["start_t"] < _t, _t < source["stop_t"]), _h != -R_OPLUS, ) logger.info( f"Found {np.sum(vld_idx)} valid values out of all {_t.shape[0]} values" ) vld_t = _t[vld_idx] vld_lambda = _lambda[vld_idx] vld_varphi = _varphi[vld_idx] vld_h = _h[vld_idx] return gpx, vld_t, vld_lambda, vld_varphi, vld_h
21,302
def check(func: Callable[..., Awaitable[Callable[[CommandContext], Awaitable[bool]]]]) -> Check: """ A decorator which creates a check from a function. """ return Check(func)
21,303
def service_c(request): """ Renders the service chair page with service submissions """ events = ServiceEvent.objects.filter(semester=get_semester()) submissions_pending = ServiceSubmission.objects.filter(semester=get_semester(), status='0').order_by("date") submissions_submitted = ServiceSubmission.objects.filter(semester=get_semester(), status='1').order_by( "date") position = Position.objects.get(title=Position.PositionChoices.SERVICE_CHAIR) hours_pending = 0 for submission in submissions_pending: hours_pending += submission.hours for submission in submissions_submitted: hours_pending += submission.hours hours_approved = 0 submissions_approved = ServiceSubmission.objects.filter(semester=get_semester(), status='2') for submission in submissions_approved: hours_approved += submission.hours context = { 'events': events, 'hours_approved': hours_approved, 'hours_pending': hours_pending, 'submissions_pending': submissions_pending, 'submissions_submitted': submissions_submitted, 'position': position, } return render(request, 'service-chair/service-chair.html', context)
21,304
def get_tank_history(request, tankid): """ Returns a response listing the device history for each tank. """ # Sanitize tankid tankid = int(tankid) # This query is too complex to be worth constructing in ORM, so just use raw SQL. cursor = connection.cursor() cursor.execute("""\ SELECT t.time, t.device_id AS mac FROM (SELECT d.time, d.device_id, LAG(d.device_id) OVER(ORDER BY d.time) AS prev_device_id FROM (SELECT time, tankid, device_id FROM devices_datum WHERE tankid = %s ) AS d ) AS t WHERE t.device_id IS DISTINCT FROM t.prev_device_id; """, [tankid]) history = dictfetchall(cursor) history_serializer = TankHistorySerializer(history, many=True) return JsonResponse(history_serializer.data, safe=False)
21,305
def get_absolute_module(obj): """ Get the abolulte path to the module for the given object. e.g. assert get_absolute_module(get_absolute_module) == 'artemis.general.should_be_builtins' :param obj: A python module, class, method, function, traceback, frame, or code object :return: A string representing the import path. """ file_path = inspect.getfile(obj) return file_path_to_absolute_module(file_path)
21,306
def parse_c45(file_base, rootdir='.'): """ Returns an ExampleSet from the C4.5 formatted data """ schema_name = file_base + NAMES_EXT data_name = file_base + DATA_EXT schema_file = find_file(schema_name, rootdir) if schema_file is None: raise ValueError('Schema file not found') data_file = find_file(data_name, rootdir) if data_file is None: raise ValueError('Data file not found') return _parse_c45(schema_file, data_file)
21,307
def to_numpy(tensor: torch.Tensor): """ Convert a PyTorch Tensor to a Numpy Array. """ if tensor is None: return tensor if tensor.is_quantized: tensor = tensor.dequantize() return tensor.cpu().detach().contiguous().numpy()
21,308
def initialize_flask_sqlathanor(db): """Initialize **SQLAthanor** contents on a `Flask-SQLAlchemy`_ instance. :param db: The :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` instance. :type db: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` :returns: A mutated instance of ``db`` that replaces `SQLAlchemy`_ components and their `Flask-SQLAlchemy`_ flavors with **SQLAthanor** analogs while maintaining `Flask-SQLAlchemy`_ and `SQLAlchemy`_ functionality and interfaces. :rtype: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` :raises ImportError: if called when `Flask-SQLAlchemy`_ is not installed :raises ValueError: if ``db`` is not an instance of :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` """ from flask_sqlalchemy import _wrap_with_default_query_class, SQLAlchemy if not isinstance(db, SQLAlchemy): raise ValueError('db must be an instance of flask_sqlalchemy.SQLAlchemy') db.Column = Column db.relationship = _wrap_with_default_query_class(relationship, db.Query) return db
21,309
def add_vcf_header( vcf_reader ): """ Function to add a new field to the vcf header Input: A vcf reader object Return: The vcf reader object with new headers added """ # Metadata vcf_reader.metadata['SMuRFCmd'] = [get_command_line()] # Formats vcf_reader.formats['VAF'] = pyvcf.parser._Format('VAF',None,'Float','Variant Allele Frequency calculated from the BAM file') vcf_reader.formats['CAD'] = pyvcf.parser._Format('CAD',None,'Integer','Calculated Allelic Depth, used for VAF calculation') vcf_reader.formats['FT'] = pyvcf.parser._Format('FT',None,'String','Sample filter') # Filters vcf_reader.filters['KnownVariant'] = pyvcf.parser._Filter('KnownVariant','Variant has already an ID, excluding COSMIC_IDs') vcf_reader.filters['BadMQ'] = pyvcf.parser._Filter('BadMQ', 'Variant with MQ <'+str(cfg['SMuRF']['mq'])) vcf_reader.filters['BadQual'] = pyvcf.parser._Filter('BadQual','Variant with a QUAL <'+str(cfg['SMuRF']['qual'])) vcf_reader.filters['MultiAllelic'] = pyvcf.parser._Filter('MultiAllelic', 'Variant has multiple alternative alleles') vcf_reader.filters['BlackList'] = pyvcf.parser._Filter('BlackList', 'Variant exists in a blacklist') vcf_reader.filters['Indel'] = pyvcf.parser._Filter('Indel','Variant is an indel') vcf_reader.filters['ControlEvidence'] = pyvcf.parser._Filter('ControlEvidence','Variant is also found in a control based on the GT') vcf_reader.filters['NoSampleEvidence'] = pyvcf.parser._Filter('NoSampleEvidence','Variant is not found in any of the samples based on the GT') vcf_reader.filters['AllSamplesFailedQC'] = pyvcf.parser._Filter('AllSamplesFailedQC', 'All samples failed the quality control') vcf_reader.filters['AllControlsFailedQC'] = pyvcf.parser._Filter('AllControlsFailedQC', 'All controls failed the quality control') vcf_reader.filters['ControlSubclonal'] = pyvcf.parser._Filter('ControlSubclonal', 'Variant is found as subclonal in a control based on the recalculated VAF') vcf_reader.filters['ControlClonal'] = pyvcf.parser._Filter('ControlClonal', 'Variant is found as clonal in a control based on the recalculated VAF') vcf_reader.filters['NoClonalSample'] = pyvcf.parser._Filter('NoClonalSample', 'Variant is not found as clonal in any of the samples based on the recalculated VAF') # Sample filters vcf_reader.filters['LowCov'] = pyvcf.parser._Filter('LowCov', 'Variant has a coverage <'+str(cfg['SMuRF']['coverage'])+' in this sample/control') vcf_reader.filters['NoGenoType'] = pyvcf.parser._Filter('NoGenoType', 'Genotype is empty for this sample/control') vcf_reader.filters['isRef'] = pyvcf.parser._Filter('isRef', 'Genotype is a reference (i.e. reference 0/0)') vcf_reader.filters['isVariant'] = pyvcf.parser._Filter('isVariant', 'Genotype is a variant (i.e. not reference 0/0)') vcf_reader.filters['LowGQ'] = pyvcf.parser._Filter('LowGQ', 'Variant has a low genome quality for this sample/control') # Infos vcf_reader.infos['ABSENT_SAMPLES'] = pyvcf.parser._Info('ABSENT_SAMPLES',1,'Integer','Number of samples without the variant', None, None) vcf_reader.infos['SUBCLONAL_SAMPLES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLES',1,'Integer','Number of samples with a subclonal variant', None, None) vcf_reader.infos['CLONAL_SAMPLES'] = pyvcf.parser._Info('CLONAL_SAMPLES',1,'Integer','Number of samples with a clonal variant', None, None) vcf_reader.infos['ABSENT_CONTROLS'] = pyvcf.parser._Info('ABSENT_CONTROLS',1,'Integer','Number of controls without the variant', None, None) vcf_reader.infos['SUBCLONAL_CONTROLS'] = pyvcf.parser._Info('SUBCLONAL_CONTROLS',1,'Integer','Number of controls with a subclonal variant', None, None) vcf_reader.infos['CLONAL_CONTROLS'] = pyvcf.parser._Info('CLONAL_CONTROLS',1,'Integer','Number of controls with a clonal variant', None, None) vcf_reader.infos['ABSENT_SAMPLE_NAMES'] = pyvcf.parser._Info('ABSENT_SAMPLE_NAMES',None,'String','Samples without the variant', None, None) vcf_reader.infos['SUBCLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLE_NAMES',None,'String','Samples with a subclonal variant', None, None) vcf_reader.infos['CLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('CLONAL_SAMPLE_NAMES',None,'String','Samples with a clonal variant', None, None) vcf_reader.infos['ABSENT_CONTROL_NAMES'] = pyvcf.parser._Info('ABSENT_CONTROL_NAMES',None,'String','Controls without the variant', None, None) vcf_reader.infos['SUBCLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('SUBCLONAL_CONTROL_NAMES',None,'String','Controls with a subclonal variant', None, None) vcf_reader.infos['CLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('CLONAL_CONTROL_NAMES',None,'String','Controls with a clonal variant', None, None) vcf_reader.infos['PASS_QC_SAMPLES'] = pyvcf.parser._Info('PASS_QC_SAMPLES',1,'Integer','Number of samples which pass all quality control filters', None, None) vcf_reader.infos['PASS_QC_CONTROLS'] = pyvcf.parser._Info('PASS_QC_CONTROLS',1,'Integer','Number of controls which pass all quality control filters', None, None) vcf_reader.infos['FAIL_QC_SAMPLES'] = pyvcf.parser._Info('FAIL_QC_SAMPLES',1,'Integer','Number of samples which failed one or multiple quality control filters', None, None) vcf_reader.infos['FAIL_QC_CONTROLS'] = pyvcf.parser._Info('FAIL_QC_CONTROLS',1,'Integer','Number of controls which failed one or multiple quality control filters', None, None) vcf_reader.infos['PASS_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('PASS_QC_SAMPLE_NAMES',None,'String','Samples which pass all quality control filters', None, None) vcf_reader.infos['PASS_QC_CONTROL_NAMES'] = pyvcf.parser._Info('PASS_QC_CONTROL_NAMES',None,'String','Controls which pass all quality control filters', None, None) vcf_reader.infos['FAIL_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('FAIL_QC_SAMPLE_NAMES',None,'String','Samples which failed one or multiple quality control filters', None, None) vcf_reader.infos['FAIL_QC_CONTROL_NAMES'] = pyvcf.parser._Info('FAIL_QC_CONTROL_NAMES',None,'String','Controls which failed one or multiple quality control filters', None, None) return( vcf_reader )
21,310
def get_seats_percent(election_data): """ This function takes a lists of lists as and argument, with each list representing a party's election results, and returns a tuple with the percentage of Bundestag seats won by various political affiliations. Parameters: election_data (list): A list of lists, each representing a party's election results Returns: A tuple with percentage of Bundestag seats won by various political affiliations """ left_seats = 0 right_seats = 0 extreme_seats = 0 center_seats = 0 total_bundestag_seats = 0 for party in election_data[1:]: total_bundestag_seats += int(party[1]) if 'far' in party[2]: extreme_seats += party[1] else: center_seats += party[1] if 'left' in party[2]: left_seats += party[1] else: right_seats += party[1] left_percent = round((left_seats / total_bundestag_seats * 100), 2) right_percent = round((right_seats / total_bundestag_seats * 100), 2) extreme_percent = round((extreme_seats / total_bundestag_seats * 100), 2) center_percent = round((center_seats / total_bundestag_seats * 100), 2) return left_percent, right_percent, extreme_percent, center_percent
21,311
def write(filename, data): """ Pickles a file and writes it to the cache. Keyword Arguments: - filename: name of the file to write to - data: object to cache """ if _log_actions: cprint('Writing to cache: "{}"'.format(filename), 'green') joblib.dump(data, join(_cache_path, filename))
21,312
def get_player_gamelog(player_id, season, season_type='Regular Season', timeout=30): """ Coleta de histórico departidas de um determinado jogador em uma determinada temporada, considerando ainda um tipo específico de temporada (pré-season, temporada regular ou playoffs). Parâmetros ---------- :param player_id: Identificação do jogador alvo [type: int] :param season: Temporada alvo de análise [type: str, exemplo: "2020-21"] :param season_type: Tipo específico de temporada aceito pelo endpoint [type: str, default='Regular Season'] :param timeout: Tempo máximo de espera da requisição. [type: int, default=30] Retorno ------- :return df_gamelog: Base de dados com informações específicas e detalhadas sobre o histórico de partidas extraído do jogador. Informações sobre o conteúdo desta base de retorno podem ser encontradas na documentação oficial do endpoint playergamelog. [type: pd.DataFrame] """ # Retornando gamelog de jogador player_gamelog = playergamelog.PlayerGameLog( player_id=player_id, season=season, season_type_all_star=season_type, timeout=timeout ) # Transformando dados em DataFrame e adicionando informações de temporada df_gamelog = player_gamelog.player_game_log.get_data_frame() df_gamelog['SEASON'] = season df_gamelog['SEASON_TYPE'] = season_type # Transformando coluna de data na base df_gamelog['GAME_DATE'] = pd.to_datetime(df_gamelog['GAME_DATE']) df_gamelog.columns = [col.lower().strip() for col in df_gamelog.columns] return df_gamelog
21,313
def unbound_text_to_html5(text, language=None): """ Converts the provided text to HTML5 custom data attributes. Usage: {{text|unbound_text_to_html5:"Greek"}} """ # If the language is English, then don't bother doing anything if language is not None and language.lower() == "english": return text # Make the document that will contain the verse converted_doc = minidom.Document() # Make the verse node to attach the content to verse_node = converted_doc.createElement( "span" ) verse_node.setAttribute("class", "verse") # Append the converted_doc.appendChild(verse_node) # Split up the text and place the text segments in nodes segments = re.findall("[\s]+|[\[\],.:.;]|[^\s\[\],.:.;]+", text) for s in segments: # Don't wrap punctuation in a word node if s in [";", ",", ".", "[", "]", ":"] or len(s.strip()) == 0: txt_node = converted_doc.createTextNode(s) verse_node.appendChild(txt_node) else: word_node = converted_doc.createElement( "span" ) word_node.setAttribute( "class", "word" ) # Create the text node and append it if language is None or language.lower() == "greek": txt_node = converted_doc.createTextNode(s) else: txt_node = converted_doc.createTextNode(transform_text(s, language)) word_node.appendChild(txt_node) # Append the node verse_node.appendChild(word_node) return converted_doc.toxml( encoding="utf-8" )
21,314
def sort_points(points): """Sorts points first by argument, then by modulus. Parameters ---------- points : array_like (n_points, 3) The points to be sorted: (x, y, intensity) Returns ------- points_sorted : :class:`numpy.ndarray` (n_points, 3) The sorted points. """ positions = points[:, :2].astype(float) with np.errstate(invalid='ignore', divide='ignore'): tangents = np.nan_to_num(positions[:, 1]/positions[:, 0]) arguments = np.arctan(tangents) moduli = np.sqrt(np.sum(np.square(positions), axis=1)) inds = np.lexsort((moduli, arguments)) points_sorted = points[inds] return points_sorted
21,315
def get_pairs(scores): """ Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair. Parameters ---------- scores : list of int Contain a list of numbers Returns ------- query_pair : list of pairs This contains a list of pairs of indexes in scores. """ query_pair = [] for query_scores in scores: temp = sorted(query_scores, reverse=True) pairs = [] for i in range(len(temp)): for j in range(len(temp)): if temp[i] > temp[j]: pairs.append((i,j)) query_pair.append(pairs) return query_pair
21,316
def make(): """Make a new migration. Returns: Response: json status message """ response = None try: with capture_print(escape=True) as content: current_app.config.get('container').make('migrator').make(request.form['name']) response = {'message': content.get_text(), 'status': 'success'} except SystemExit: response = {'message': content.get_text(), 'status': 'error'} return jsonify(response)
21,317
def find_files(args, client): """ Get a list of all the objects to process""" objects = [] continuation_token = 'UNSET' while continuation_token: if continuation_token == 'UNSET': object_list = client.list_objects_v2(Bucket=args['bucket'],Prefix=args['prefix']) else: object_list = client.list_objects_v2(Bucket=args['bucket'], Prefix=args['prefix'], ContinuationToken=continuation_token) if args['debug']: log("Found %d items from bucket list_objects_v2(), includes dirs."% object_list['KeyCount'], level="DEBUG") # This means we have no more keys, or none found if object_list['KeyCount'] > 0: for item in object_list['Contents']: if not item['Key'].endswith('/'): # ignore directories objects.append(item['Key']) # And here we check to see if there's more results to recover if object_list['IsTruncated']: continuation_token = object_list['NextContinuationToken'] else: continuation_token = False # What did we get? log("Found %d items"% len(objects)) # If we have a tracking database argument we need to dedupe the list against already # processed files if args['track']: conn = initalise_connection(args['track']) for filepath in conn.execute('''SELECT filepath FROM files WHERE bucket=? AND filepath LIKE ?''', (args['bucket'], args['prefix'] + '%') ): if filepath[0] in objects: objects.remove(filepath[0]) if args['debug']: log("Excluding already processed file %s"% filepath[0], level="DEBUG") conn.close() return objects
21,318
def are_dirs_equal( dir1: Union[str, os.PathLike], dir2: Union[str, os.PathLike], ignore: Optional[List[str]] = None, ) -> bool: """ Compare the content of two directories, recursively. :param dir1: the left operand. :param dir2: the right operand. :param ignore: is a list of names to ignore (see dircmp docs regarding 'ignore'). :return: True if the directories are equal, False otherwise. """ ignore = ignore or None left_only, right_only, diff = dircmp_recursive( filecmp.dircmp(dir1, dir2, ignore=ignore) ) return left_only == right_only == diff == set()
21,319
def writeCSV(info, resultFile): """ Write info line to CSV :param info: :return: """ try: with codecs.open(resultFile, 'a', encoding='utf8') as fh_results: # Print every field from the field list to the output file for field_pretty in CSV_FIELD_ORDER: field = CSV_FIELDS[field_pretty] try: field = info[field] except KeyError as e: field = "False" try: field = str(field).replace(r'"', r'\"').replace("\n", " ") except AttributeError as e: if args.debug: traceback.print_exc() pass fh_results.write("%s;" % field) # Append vendor scan results for vendor in VENDORS: if vendor in info['vendor_results']: fh_results.write("%s;" % info['vendor_results'][vendor]) else: fh_results.write("-;") fh_results.write('\n') except: if args.debug: traceback.print_exc() return False return True
21,320
def generate_hmac(str_to_sign, secret): """Signs the specified string using the specified secret. Args: str_to_sign : string, the string to sign secret : string, the secret used to sign Returns: signed_message : string, the signed str_to_sign """ message = str_to_sign.encode('utf-8') secret = secret.encode('utf-8') cmd = ['echo -n "' + str(message) + '" | openssl dgst -sha256 -binary -hmac "' + str(secret) + '"'] process, signed_message, error = linuxutil.popen_communicate(cmd, shell=True) if process.returncode != 0: raise Exception("Unable to generate signature. " + str(error)) return signed_message
21,321
def interpolate_scores(coords: np.array, scores: np.array, coord_range: tuple, step: float = 0.001) -> np.array: """ Given a coord_range and values for specific coords - interpolate to the rest of the grid Args: coords: array of lons and lats of points that their values are known scores: array of the coords values coord_range: range of the desired grid step: resolution of sample Returns: z: np.array - 2D array of the values in the entire grid of coord_range """ min_lon, min_lat, max_lon, max_lat = coord_range x = np.arange(min_lon, max_lon, step=step) y = np.arange(min_lat, max_lat, step=step) grid_x, grid_y = np.meshgrid(x, y) z = interpolate.griddata(coords, scores, (x[None, :], y[:, None]), method='linear') return z
21,322
def remove_blank_from_dict(data): """Optimise data from default outputted dictionary""" if isinstance(data, dict): return dict( (key, remove_blank_from_dict(value)) for key, value in data.items() if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value)) ) if isinstance(data, list): return [ remove_blank_from_dict(value) for value in data if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value)) ] return data
21,323
def upload_to_database( database_secrets_manager_arn: str, user_mapping: List[Dict] ) -> str: """Uploads data from disk to an RDS postgres instance. Uses the provided user_mapping to replace the user subs of data in the local file with the user subs of the newly created users in cognito.""" with open(SOURCE_DATABASE_DUMP_FILE, "r") as f: database_dump = f.read() for user in user_mapping: database_dump = database_dump.replace(user["source_sub"], user["target_sub"]) [CURATOR_SUB] = [ target_user["target_sub"] for target_user in user_mapping if target_user["email"] == "curator@apt.com" ] def replace_missing_sub_with_curator_sub(match): if match.group(0) in [ target_user["target_sub"] for target_user in user_mapping ]: return match.group(0) return CURATOR_SUB database_dump = re.sub( r"[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}", replace_missing_sub_with_curator_sub, database_dump, ) with open(SOURCE_DATABASE_DUMP_FILE, "w") as f: f.write(database_dump) secrets = json.loads( secretsmanager_client.get_secret_value(SecretId=database_secrets_manager_arn)[ "SecretString" ] ) dsn = f"postgres://{secrets['username']}:{secrets['password']}@{secrets['host']}/{secrets['dbname']}" subprocess.Popen( f"psql '{dsn}?options=--search_path%3dapt' -f {SOURCE_DATABASE_DUMP_FILE}", shell=True, stdout=subprocess.PIPE, ).stdout.read() return SOURCE_DATABASE_DUMP_FILE
21,324
def new_client( dockerd_url=None, tls=False, tls_verify=False, cert_path=None, timeout=None, ): """ Return a newly configured Docker client. """ _dockerd_url = dockerd_url if not _dockerd_url: _dockerd_url = os.getenv('DOCKER_HOST', DOCKER_DEFAULT_DOCKERD_URL) _tls = tls tls_config = None if tls_verify or str(os.environ.get('DOCKER_TLS_VERIFY', '0')) == '1': _tls = True _cert_path = os.getenv('DOCKER_CERT_PATH', cert_path) if not _cert_path: raise BuildRunnerConfigurationError( "TLS connection specified but cannot determine cert path" " (from DOCKER_CERT_PATH env variable)" ) ca_cert_path = os.path.join(_cert_path, 'ca.pem') client_cert = ( os.path.join(_cert_path, 'cert.pem'), os.path.join(_cert_path, 'key.pem') ) tls_config = docker.tls.TLSConfig( ssl_version=ssl.PROTOCOL_TLSv1, client_cert=client_cert, verify=ca_cert_path, assert_hostname=False, ) if _tls: # make sure the scheme is https url_parts = urllib.parse.urlparse(_dockerd_url) if url_parts.scheme == 'tcp': _dockerd_url = urllib.parse.urlunparse(('https',) + url_parts[1:]) args = {} if timeout is not None: if timeout == 0: args['timeout'] = MAX_TIMEOUT else: args['timeout'] = timeout return Client( base_url=_dockerd_url, version=DOCKER_API_VERSION, tls=tls_config, **args )
21,325
def merge_mosaic_images(mosaic_dict, mosaic_images, orig_images, Y_orig=None): """ Merge the list of mosaic images with all original images. Args: mosaic_dict: Dictionary specifying how mosaic images were created, returned from make_mosaic mosaic_images: List of all mosaic images returned from make_mosaic orig_images: List of all images, some (or all, or none) of which were used to generate the mosaic images Y_orig: If building mosaic images for training, the Y/expected images corresponding to orig_images Returns: 3 lists - merged_images, merged_sizes, merged_Y (empty list if Y_orig was not provided). This list of images can then be resized, windowed, etc., and provided as input images for training or predictions. To split the merged list back into the separate portions, use split_merged_mosaic. """ orig_index = list(range(0, len(orig_images))) merged_images = [] merged_sizes = [] merged_Y = [] # If Y/expected values are desired, construct the merged Y # images to correspond with the mosaic images. if Y_orig: for k, v in mosaic_dict.items(): merged_Y.append(combine_images(Y_orig, v)) # Mosaic images are output first for img in mosaic_images: merged_images.append(img) merged_sizes.append([img.shape[0], img.shape[1]]) mosaic_all_ix=[] [mosaic_all_ix.extend(v) for v in mosaic_dict.values()] leftovers = [x for x in orig_index if x not in mosaic_all_ix] # And then output all images that are not part of a larger mosaic image for ix in leftovers: leftover_img = orig_images[ix] merged_images.append(leftover_img) merged_sizes.append([leftover_img.shape[0], leftover_img.shape[1]]) if Y_orig: merged_Y.append(Y_orig[ix]) return (merged_images, merged_sizes, merged_Y)
21,326
def plot_representation_size_time( representation_size_results, include_cis=True): """Plot the cross-validation time values from the output of `representation_size_experiments`""" kwargs = { 'metric_name': 'Cross-validation time (in secs.)', 'metric_mean_name': 'Mean cross-validation time (in secs.)', 'ylabel': 'Cross-validation time (log-scale)', 'value_transform': (lambda x : np.log(x)), 'include_cis': include_cis, 'ylabel_overlap_threshold': 0.2} plot_representation_size(representation_size_results, **kwargs)
21,327
def show2D(dd, impixel=None, im=None, fig=101, verbose=1, dy=None, sigma=None, colorbar=False, title=None, midx=2, units=None): """ Show result of a 2D scan Args: dd (DataSet) impixel (array or None) im (array or None) """ if dd is None: return None extent, g0, g1, vstep, vsweep, arrayname = dataset2Dmetadata(dd) tr = image_transform(dd, mode='pixel') array = getattr(dd, arrayname) if impixel is None: if im is None: im = np.array(array) impixel = tr._transform(im) else: pass else: pass labels = [s.name for s in array.set_arrays] xx = extent xx = tr.matplotlib_image_extent() ny = vstep.size nx = vsweep.size im = qtt.utilities.tools.diffImageSmooth(impixel, dy=dy, sigma=sigma) if verbose: print('show2D: nx %d, ny %d' % (nx, ny,)) if verbose >= 2: print('extent: %s' % xx) if units is None: unitstr = '' else: unitstr = ' (%s)' % units if fig is not None: scanjob = dd.metadata.get('scanjob', dict()) pgeometry.cfigure(fig) plt.clf() if impixel is None: if verbose >= 2: print('show2D: show raw image') plt.pcolormesh(vstep, vsweep, im) else: if verbose >= 2: print('show2D: show image') plt.imshow(impixel, extent=xx, interpolation='nearest') labelx = labels[1] labely = labels[0] if scanjob.get('sweepdata', None) is not None: labelx = sweepgate(scanjob) plt.xlabel('%s' % labelx + unitstr) else: pass if scanjob.get('stepdata', None) is not None: if units is None: plt.ylabel('%s' % stepgate(scanjob)) else: plt.ylabel('%s (%s)' % (stepgate(scanjob), units)) if not title is None: plt.title(title) if colorbar: plt.colorbar() if verbose >= 2: print('show2D: at show') try: plt.show(block=False) except: # ipython backend does not know about block keyword... plt.show() return xx, vstep, vsweep
21,328
def get_job(api_key, jq_id): """ Fetch a job and its status :param api_key: user id of the client :param jq_id: job queue id :return: job queue id """ if Auth.verify_auth_key(api_key): if Auth.verify_job(api_key, jq_id): return trigger.get_job(jq_id) return abort(400)
21,329
def show_dnp_properties(radical, mwFrequency, dnpNucleus): """Calculate DNP Properties Currently only implemented for liquid state experiments Args: radical: Radical name, see mrProperties.py mwFrequency: Microwave frequency in (Hz) dnpNucleus: Nucleus for DNP-NMR experiments Example: .. code-block:: python dnp.dnpTools.show_dnp_poperties('gfree', 9.45e9, '1H') """ # http://physics.nist.gov/constants mub = 9.27400968e-24 planck = 6.62606957e-34 # Get radical properties glist = radicalProperties.get(radical)[0] nucleus = radicalProperties.get(radical)[1] Alist = radicalProperties.get(radical)[2] # Get g-value g = np.array(glist) giso = np.sum(g) / g.size B0 = mwFrequency * planck / giso / mub # Get hyperfine coupling and calculate isotropic value A = np.array(Alist) AisoMHz = np.sum(A) / A.size gmr_e = mr_properties("0e") AisoT = AisoMHz / gmr_e / 2 / np.pi if nucleus != None: nucSpin = mr_properties(nucleus, "spin") n = 2 * nucSpin + 1 ms = np.linspace(-1.0 * nucSpin, nucSpin, int(n)) B = B0 + ms * AisoT else: nucSpin = 0 B = B0 print("") print("Input Parameters: ") print("Radical : ", radical) print("giso : %8.6f" % giso) print("Nucleus : ", nucleus) print("Nuc Spin : ", nucSpin) print("Aiso (MHz) : %4.2f" % AisoMHz) print("") print("Predicted Field Values for DNP: ") m = 1 for b in B: print("Transition: ", m) print("B (T) : %6.4f" % b) nmr = mr_properties("1H") * b * 10 / 2 / np.pi print("NMR Frequency (MHz) : %6.3f" % nmr) print("") m += 1
21,330
async def test_options(hass): """Test updating options.""" entry = MockConfigEntry( domain=transmission.DOMAIN, title=CONF_NAME, data=MOCK_ENTRY, options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL}, ) flow = init_config_flow(hass) options_flow = flow.async_get_options_flow(entry) result = await options_flow.async_step_init() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "init" result = await options_flow.async_step_init({CONF_SCAN_INTERVAL: 10}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["data"][CONF_SCAN_INTERVAL] == 10
21,331
def test_auth_check_returns_user_stt(api_client, user): """If user is authenticated auth_check should return user data.""" api_client.login(username=user.username, password="test_password") serializer = UserProfileSerializer(user) response = api_client.get(reverse("authorization-check")) assert response.data["user"]["stt"] == serializer.data["stt"]
21,332
def contour_to_valid(cnt, image_shape): """Convert rect to xys, i.e., eight points The `image_shape` is used to to make sure all points return are valid, i.e., within image area """ # rect = cv2.minAreaRect(cnt) if len(cnt.shape) != 3: assert 1 < 0 rect = cnt.reshape([cnt.shape[0], cnt.shape[2]]) h, w = image_shape[0:2] def get_valid_x(x): if x < 0: return 0 if x >= w: return w - 1 return x def get_valid_y(y): if y < 0: return 0 if y >= h: return h - 1 return y for i_xy, (x, y) in enumerate(rect): x = get_valid_x(x) y = get_valid_y(y) rect[i_xy, :] = [x, y] points = np.reshape(rect, -1) return points
21,333
def test_verify_date(): """Verify date works and also an invalid case is tested.""" valid_dates = "2022-01-12" invalid_dates = "2022-13-32" assert verify_date(valid_dates) == valid_dates with pytest.raises(InvalidDateError) as excinfo: verify_date(invalid_dates) assert "YYYY-MM-DD" in str(excinfo.value)
21,334
def search(datafile, query, bool_operator): """ Queries on a set of documents. :param datafile: The location of the datafile as a pathlib.Path :param query: the query text :param bool_operator: the operator. Must be one of [OR, AND] :return: the list of indexes matching the search criteria """ # we normalize to uinique lowercase words the query string and split by space query = _extract_words(query) # we read the datafile data = datafile.readlines() # calculating results results = [str(i) for i, text in enumerate(data) if (query.issubset(_extract_words(text)) if bool_operator == 'AND' else bool(query.intersection(_extract_words(text))))] return results
21,335
def recurse_structures( structure: Component, ignore_components_prefix: Optional[List[str]] = None, ignore_functions_prefix: Optional[List[str]] = None, ) -> DictConfig: """Recurse over structures""" ignore_functions_prefix = ignore_functions_prefix or [] ignore_components_prefix = ignore_components_prefix or [] if ( hasattr(structure, "function_name") and structure.function_name in ignore_functions_prefix ): return DictConfig({}) if hasattr(structure, "name") and any( [structure.name.startswith(i) for i in ignore_components_prefix] ): return DictConfig({}) output = {structure.name: structure.info} for element in structure.references: if ( isinstance(element, ComponentReference) and element.ref_cell.name not in output ): output.update(recurse_structures(element.ref_cell)) return output
21,336
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask): """ Compute DTI. :param fname_in: input 4d file. :param bvals: bvals txt file :param bvecs: bvecs txt file :param prefix: output prefix. Example: "dti_" :param method: algo for computing dti :param evecs: bool: output diffusion tensor eigenvectors :return: True/False """ # Open file. from spinalcordtoolbox.image import Image nii = Image(fname_in) data = nii.data sct.printv('data.shape (%d, %d, %d, %d)' % data.shape) # open bvecs/bvals bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs) gtab = gradient_table(bvals, bvecs) # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. if not file_mask == '': sct.printv('Open mask file...', param.verbose) # open mask file nii_mask = Image(file_mask) mask = nii_mask.data # fit tensor model sct.printv('Computing tensor using "' + method + '" method...', param.verbose) import dipy.reconst.dti as dti if method == 'standard': tenmodel = dti.TensorModel(gtab) if file_mask == '': tenfit = tenmodel.fit(data) else: tenfit = tenmodel.fit(data, mask) elif method == 'restore': import dipy.denoise.noise_estimate as ne sigma = ne.estimate_sigma(data) dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) if file_mask == '': tenfit = dti_restore.fit(data) else: tenfit = dti_restore.fit(data, mask) # Compute metrics sct.printv('Computing metrics...', param.verbose) # FA nii.data = tenfit.fa nii.save(prefix + 'FA.nii.gz', dtype='float32') # MD nii.data = tenfit.md nii.save(prefix + 'MD.nii.gz', dtype='float32') # RD nii.data = tenfit.rd nii.save(prefix + 'RD.nii.gz', dtype='float32') # AD nii.data = tenfit.ad nii.save(prefix + 'AD.nii.gz', dtype='float32') if evecs: data_evecs = tenfit.evecs # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data for idim in range(3): nii.data = data_evecs[:, :, :, :, idim] nii.save(prefix + 'V' + str(idim+1) + '.nii.gz', dtype="float32") return True
21,337
def solve( netlist=None, parameter_values=None, experiment=None, I_init=1.0, htc=None, initial_soc=0.5, nproc=12, output_variables=None, ): """ Solves a pack simulation Parameters ---------- netlist : pandas.DataFrame A netlist of circuit elements with format. desc, node1, node2, value. Produced by liionpack.read_netlist or liionpack.setup_circuit parameter_values : pybamm.ParameterValues class A dictionary of all the model parameters experiment : pybamm.Experiment class The experiment to be simulated. experiment.period is used to determine the length of each timestep. I_init : float, optional Initial guess for single battery current [A]. The default is 1.0. htc : float array, optional Heat transfer coefficient array of length Nspm. The default is None. initial_soc : float The initial state of charge for every battery. The default is 0.5 nproc : int, optional Number of processes to start in parallel for mapping. The default is 12. output_variables : list, optional Variables to evaluate during solve. Must be a valid key in the model.variables Raises ------ Exception DESCRIPTION. Returns ------- output : ndarray shape [# variable, # steps, # batteries] simulation output array """ if netlist is None or parameter_values is None or experiment is None: raise Exception("Please supply a netlist, paramater_values, and experiment") # Get netlist indices for resistors, voltage sources, current sources Ri_map = netlist["desc"].str.find("Ri") > -1 V_map = netlist["desc"].str.find("V") > -1 I_map = netlist["desc"].str.find("I") > -1 Terminal_Node = np.array(netlist[I_map].node1) Nspm = np.sum(V_map) # Generate the protocol from the supplied experiment protocol = lp.generate_protocol_from_experiment(experiment) dt = experiment.period Nsteps = len(protocol) # Solve the circuit to initialise the electrochemical models V_node, I_batt = lp.solve_circuit(netlist) # Create battery simulation and update initial state of charge sim = lp.create_simulation(parameter_values, make_inputs=True) lp.update_init_conc(sim, SoC=initial_soc) # The simulation output variables calculated at each step for each battery # Must be a 0D variable i.e. battery wide volume average - or X-averaged for 1D model variable_names = [ "Terminal voltage [V]", "Measured battery open circuit voltage [V]", ] if output_variables is not None: for out in output_variables: if out not in variable_names: variable_names.append(out) # variable_names = variable_names + output_variables Nvar = len(variable_names) # Storage variables for simulation data shm_i_app = np.zeros([Nsteps, Nspm], dtype=float) shm_Ri = np.zeros([Nsteps, Nspm], dtype=float) output = np.zeros([Nvar, Nsteps, Nspm], dtype=float) # Initialize currents in battery models shm_i_app[0, :] = I_batt * -1 # Set up integrator integrator, variables_fn, t_eval = _create_casadi_objects( I_init, htc[0], sim, dt, Nspm, nproc, variable_names ) # Step forward in time time = 0 end_time = dt * Nsteps step_solutions = [None] * Nspm V_terminal = [] record_times = [] v_cut_lower = parameter_values["Lower voltage cut-off [V]"] v_cut_higher = parameter_values["Upper voltage cut-off [V]"] sim_start_time = ticker.time() for step in tqdm(range(Nsteps), desc='Solving Pack'): # Step the individual battery models step_solutions, var_eval = _mapped_step( sim.built_model, step_solutions, lp.build_inputs_dict(shm_i_app[step, :], htc), integrator, variables_fn, t_eval, ) output[:, step, :] = var_eval time += dt # Calculate internal resistance and update netlist temp_v = output[0, step, :] temp_ocv = output[1, step, :] # temp_Ri = output[2, step, :] # This could be used instead of Equivalent ECM resistance which has # been changing definition temp_Ri = (temp_ocv - temp_v) / shm_i_app[step, :] # Make Ri more stable current_cutoff = np.abs(shm_i_app[step, :]) < 1e-6 temp_Ri[current_cutoff] = 1e-12 # temp_Ri = 1e-12 shm_Ri[step, :] = temp_Ri netlist.loc[V_map, ("value")] = temp_ocv netlist.loc[Ri_map, ("value")] = temp_Ri netlist.loc[I_map, ("value")] = protocol[step] # Stop if voltage limits are reached if np.any(temp_v < v_cut_lower): print("Low voltage limit reached") break if np.any(temp_v > v_cut_higher): print("High voltage limit reached") break if time <= end_time: record_times.append(time) V_node, I_batt = lp.solve_circuit(netlist) V_terminal.append(V_node[Terminal_Node][0]) if time < end_time: shm_i_app[step + 1, :] = I_batt[:] * -1 # Collect outputs all_output = {} all_output["Time [s]"] = np.asarray(record_times) all_output["Pack current [A]"] = np.asarray(protocol[: step + 1]) all_output["Pack terminal voltage [V]"] = np.asarray(V_terminal) all_output["Cell current [A]"] = shm_i_app[: step + 1, :] for j in range(Nvar): all_output[variable_names[j]] = output[j, : step + 1, :] toc = ticker.time() lp.logger.notice( "Solve circuit time " + str(np.around(toc - sim_start_time, 3)) + "s" ) return all_output
21,338
def global_node_entropy(data, dx=3, dy=1, taux=1, tauy=1, overlapping=True, connections="all", tie_precision=None): """ Calculates global node entropy\\ [#pessa2019]_\\ :sup:`,`\\ [#McCullough]_ for an ordinal network obtained from data. (Assumes directed and weighted edges). Parameters ---------- data : array, return of :func:`ordpy.ordinal_network` Array object in the format :math:`[x_{1}, x_{2}, x_{3}, \\ldots ,x_{n}]` or :math:`[[x_{11}, x_{12}, x_{13}, \\ldots, x_{1m}], \\ldots, [x_{n1}, x_{n2}, x_{n3}, \\ldots, x_{nm}]]` or an ordinal network returned by :func:`ordpy.ordinal_network`\\ [*]_. dx : int Embedding dimension (horizontal axis) (default: 3). dy : int Embedding dimension (vertical axis); it must be 1 for time series (default: 1). taux : int Embedding delay (horizontal axis) (default: 1). tauy : int Embedding delay (vertical axis) (default: 1). overlapping : boolean If `True`, **data** is partitioned into overlapping sliding windows (default: `True`). If `False`, adjacent partitions are non-overlapping. connections : str The ordinal network is constructed using `'all'` permutation successions in a symbolic sequence or only `'horizontal'` or `'vertical'` successions. Parameter only valid for image data (default: `'all'`). tie_precision : int If not `None`, **data** is rounded with `tie_precision` number of decimals (default: `None`). Returns ------- : float Value of global node entropy. Notes ----- .. [*] In case **data** is an ordinal network returned by :func:`ordpy.ordinal_network`, the parameters of :func:`ordpy.global_node_entropy` are infered from the network. Examples -------- >>> global_node_entropy([1,2,3,4,5,6,7,8,9], dx=2) 0.0 >>> >>> global_node_entropy(ordinal_network([1,2,3,4,5,6,7,8,9], dx=2)) 0.0 >>> >>> global_node_entropy(np.random.uniform(size=100000), dx=3) 1.4988332319747597 >>> >>> global_node_entropy(random_ordinal_network(dx=3)) 1.5 >>> >>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='horizontal') 0.25 >>> >>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='vertical') 0.0 """ if len(data)==3 and type(data[0][0])==np.str_: nodes, links, weights = data else: #assumes 'normalized==True' and 'directed==True'. nodes, links, weights = ordinal_network(data, dx, dy, taux, tauy, True, overlapping, True, connections, tie_precision=tie_precision) links_source = links.transpose()[0] links_target = links.transpose()[1] h_gn = 0 for node in nodes: args = np.argwhere(links_source==node).flatten() renorm_weights = weights[args]/np.sum(weights[args]) args_in = np.argwhere(links_target==node).flatten() p_in = np.sum(weights[args_in]) h_i = -np.sum(renorm_weights*np.log2(renorm_weights)) h_gn += p_in*h_i return h_gn
21,339
def get_cert(certificate): """ Return the data of the certificate :returns: the certificate file contents """ cert_file = "{}/certs/{}".format(snapdata_path, certificate) with open(cert_file) as fp: cert = fp.read() return cert
21,340
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27): """ Trains the model and generates dinosaur names. Arguments: data -- text corpus ix_to_char -- dictionary that maps the index to a character char_to_ix -- dictionary that maps a character to an index num_iterations -- number of iterations to train the model for n_a -- number of units of the RNN cell dino_names -- number of dinosaur names you want to sample at each iteration. vocab_size -- number of unique characters found in the text (size of the vocabulary) Returns: parameters -- learned parameters """ # Retrieve n_x and n_y from vocab_size n_x, n_y = vocab_size, vocab_size # Initialize parameters parameters = initialize_parameters(n_a, n_x, n_y) # Initialize loss (this is required because we want to smooth our loss) loss = get_initial_loss(vocab_size, dino_names) # Build list of all dinosaur names (training examples). with open("dinos.txt") as f: examples = f.readlines() examples = [x.lower().strip() for x in examples] # Shuffle list of all dinosaur names np.random.seed(0) np.random.shuffle(examples) # Initialize the hidden state of your LSTM a_prev = np.zeros((n_a, 1)) # Optimization loop for j in range(num_iterations): ### START CODE HERE ### # Set the index `idx` (see instructions above) idx = j%len(examples) # Set the input X (see instructions above) single_example = examples[idx] single_example_chars = [c for c in single_example] single_example_ix = [char_to_ix[c] for c in single_example_chars] X = [None]+single_example_ix # Set the labels Y (see instructions above) ix_newline = char_to_ix["\n"] Y = X[1:]+[ix_newline] # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters # Choose a learning rate of 0.01 curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01) ### END CODE HERE ### # Use a latency trick to keep the loss smooth. It happens here to accelerate the training. loss = smooth(loss, curr_loss) # Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly if j % 2000 == 0: print('Iteration: %d, Loss: %f' % (j, loss) + '\n') # The number of dinosaur names to print seed = 0 for name in range(dino_names): # Sample indices and print them sampled_indices = sample(parameters, char_to_ix, seed) print_sample(sampled_indices, ix_to_char) seed += 1 # To get the same result (for grading purposes), increment the seed by one. print('\n') return parameters
21,341
def random_choice(gene): """ Randomly select a object, such as strings, from a list. Gene must have defined `choices` list. Args: gene (Gene): A gene with a set `choices` list. Returns: object: Selected choice. """ if not 'choices' in gene.__dict__: raise KeyError("'choices' not defined in this gene, please include a list values!") return random.choice(gene.choices)
21,342
def get_actor(payload: PayloadJSON, actor_id: int) -> ResourceJSON: """Return an actor by actor_id""" actor = ActorModel.find_by_id(actor_id) if actor is None: abort(404) return jsonify({"success": True, "actor": actor.json()},)
21,343
def check_if_punctuations(word: str) -> bool: """Returns ``True`` if ``word`` is just a sequence of punctuations.""" for c in word: if c not in string.punctuation: return False return True
21,344
def calculate_sigma_points(states, flat_covs, scaling_factor, out, square_root_filters): """Calculate the array of sigma_points for the unscented transform. Args: states (np.ndarray): numpy array of (nind, nemf, nfac) flat_covs (np.ndarray): numpy array of (nind * nemf, nfac, nfac) scaling_factor (float): a constant scaling factor for sigma points that depends on the sigma_point algorithm chosen. out (np.ndarray): numpy array of (nemf * nind, nsigma, nfac) with sigma_points. square_root_filters (bool): indicates if square-root filters are used. """ if square_root_filters is True: cholcovs_t = flat_covs[:, 1:, 1:] else: cholcovs_t = np.transpose(np.linalg.cholesky(flat_covs), axes=(0, 2, 1)) nemf_times_nind, nsigma, nfac = out.shape out[:] = states.reshape(nemf_times_nind, 1, nfac) cholcovs_t *= scaling_factor out[:, 1: nfac + 1, :] += cholcovs_t out[:, nfac + 1:, :] -= cholcovs_t
21,345
def normalize_text(string, remove_stopwords=False, stem_words=False): """ Remove punctuation, parentheses, question marks, etc. """ strip_special_chars = re.compile("[^A-Za-z0-9 ]+") string = string.lower() string = string.replace("<br />", " ") string = string.replace(r"(\().*(\))|([^a-zA-Z'])",' ') string = string.replace('&', 'and') string = string.replace('@', 'at') string = string.replace('0', 'zero') string = string.replace('1', 'one') string = string.replace('2', 'two') string = string.replace('3', 'three') string = string.replace('4', 'four') string = string.replace('5', 'five') string = string.replace('6', 'six') string = string.replace('7', 'seven') string = string.replace('8', 'eight') string = string.replace('9', 'nine') string = string.split() if remove_stopwords: stop_words = stopwords.words('english') string = [w for w in string if w not in stop_words] if stem_words: ps = PorterStemmer() string = [ps.stem(w) for w in string] string = ' '.join(string) return re.sub(strip_special_chars, "", string)
21,346
def get_new_codes(): """ Return New Codes and Refresh DB""" db = dataset.connect(database_url) new_codes = get_code() table = db['promo'] """ Get New Codes""" new = {} for key, value in new_codes.items(): if table.find_one(promo=key) is None: new[key] = [new_codes[key][0], new_codes[key][1]] else: pass """ Add to DB """ for key in new: table.insert(dict(promo=key, desc=new_codes[key][1], exp=new_codes[key][0])) return new
21,347
def process_bulk_add_ip(request, formdict): """ Performs the bulk add of ips by parsing the request data. Batches some data into a cache object for performance by reducing large amounts of single database queries. :param request: Django request. :type request: :class:`django.http.HttpRequest` :param formdict: The form representing the bulk uploaded data. :type formdict: dict :returns: :class:`django.http.HttpResponse` """ ip_names = [] cached_results = {} cleanedRowsData = convert_handsontable_to_rows(request) for rowData in cleanedRowsData: if rowData != None and rowData.get(form_consts.IP.IP_ADDRESS) != None: ip_names.append(rowData.get(form_consts.IP.IP_ADDRESS).lower()) ip_results = IP.objects(ip__in=ip_names) for ip_result in ip_results: cached_results[ip_result.ip] = ip_result cache = {form_consts.IP.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData} response = parse_bulk_upload(request, parse_row_to_bound_ip_form, add_new_ip_via_bulk, formdict, cache) return response
21,348
def test_list_zones_no_search_first_page(list_zone_context, shared_zone_test_context): """ Test that the first page of listing zones returns correctly when no name filter is provided """ result = shared_zone_test_context.list_zones_client.list_zones(name_filter=f"*{shared_zone_test_context.partition_id}", max_items=3) zones = result["zones"] assert_that(zones, has_length(3)) assert_that(zones[0]["name"], is_(list_zone_context.search_zone1["name"])) assert_that(zones[1]["name"], is_(list_zone_context.search_zone2["name"])) assert_that(zones[2]["name"], is_(list_zone_context.search_zone3["name"])) assert_that(result["nextId"], is_(list_zone_context.search_zone3["name"])) assert_that(result["maxItems"], is_(3)) assert_that(result, is_not(has_key("startFrom"))) assert_that(result["nameFilter"], is_(f"*{shared_zone_test_context.partition_id}"))
21,349
def gen_public_e(lambda_: int) -> int: """ Generates decrecingly smaller sequence of bytes and converts them to integer until one satisfies > lambda Continues with half the ammount of necesary bytes decreasing by one integer until gcd(candidate, lambda) == 1. """ bytes_ = 1028 + 1 candidate = crand(bytes_) while candidate > lambda_: # Finds random amount of bytes candidate = crand(bytes_) bytes_ -= 1 candidate = crand(bytes_ // 2) # Generates new candidate in the middle e = 2**16 + 1 while candidate > e: # Finds candidate that satisfies gcd if gcd(candidate, lambda_) == 1: break candidate -= 1 return candidate
21,350
def lazy(maybe_callable: Union[T, Callable[[], T]]) -> T: """ Call and return a value if callable else return it. >>> lazy(42) 42 >>> lazy(lambda: 42) 42 """ if callable(maybe_callable): return maybe_callable() return maybe_callable
21,351
def basic_demo(): """基础示例""" tree = ET.ElementTree(file="./menu.xml") root = tree.getroot() # Element有标签和属性字典 print(root.tag) print(root.attrib) # 迭代根节点得到子节点 for child in root: print(child.tag, child.attrib) # 访问嵌套的子级 print(root[0][1].text)
21,352
def _git_diff(staged_or_modified: bool, extension: str) -> List[str]: """ Args: extension: the extension of files considered, such as "py" or "ml" staged_or_modified (bool) Whether to consider staged files (True) or modified ones (False) Returns: A list of relevant versioned files that are staged or modified """ git_cmd = ["git", "diff"] if staged_or_modified: git_cmd += ["--cached"] git_cmd += ["--name-only", "--diff-filter=ACMR", "*." + extension] git_diff_result = subprocess.run(git_cmd, stdout=subprocess.PIPE, universal_newlines=True, check=True) # The comprehension filters empty lines return [x for x in git_diff_result.stdout.split("\n") if x]
21,353
def shuffle_list(*ls): """ shuffle multiple list at the same time :param ls: :return: """ from random import shuffle l = list(zip(*ls)) shuffle(l) return zip(*l)
21,354
def cancer_variants(institute_id, case_name): """Show cancer variants overview.""" data = controllers.cancer_variants(store, request.args, institute_id, case_name) return data
21,355
def test_anything_call(b): """Mathing anything should always return True.""" result = anything(b) assert result is True
21,356
def _get_ec2_on_demand_prices(region_name: str) -> Iterable[CloudInstanceType]: """ Returns a dataframe with columns instance_type, memory_gb, logical_cpu, and price where price is the on-demand price """ # All comments about the pricing API are based on # https://www.sentiatechblog.com/using-the-ec2-price-list-api # us-east-1 is the only region this pricing API is available and the pricing # endpoint in us-east-1 has pricing data for all regions. pricing_client = boto3.client("pricing", region_name="us-east-1") filters = [ # only get prices for the specified region { "Type": "TERM_MATCH", "Field": "location", "Value": _get_region_description_for_pricing(region_name), }, # filter out instance types that come with SQL Server pre-installed { "Type": "TERM_MATCH", "Field": "preInstalledSw", "Value": "NA", }, # limit ourselves to just Linux instances for now # TODO add support for Windows eventually { "Type": "TERM_MATCH", "Field": "operatingSystem", "Value": "Linux", }, # Shared is a "regular" EC2 instance, as opposed to Dedicated and Host {"Type": "TERM_MATCH", "Field": "tenancy", "Value": "Shared"}, # This relates to EC2 capacity reservations. Used is correct for when we don't # have any reservations {"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": "Used"}, ] for product_json in _boto3_paginate( pricing_client.get_products, Filters=filters, ServiceCode="AmazonEC2", FormatVersion="aws_v1", ): product = json.loads(product_json) attributes = product["product"]["attributes"] instance_type = attributes["instanceType"] # We don't expect the "warnings" to get hit, we just don't want to get thrown # off if the data format changes unexpectedly or something like that. if "physicalProcessor" not in attributes: print( f"Warning, skipping {instance_type} because physicalProcessor is not " "specified" ) continue # effectively, this skips Graviton (ARM-based) processors # TODO eventually support Graviton processors. if ( "intel" not in attributes["physicalProcessor"].lower() and "amd" not in attributes["physicalProcessor"].lower() ): # only log if we see non-Graviton processors if "AWS Graviton" not in attributes["physicalProcessor"]: print( "Skipping non-Intel/AMD processor " f"{attributes['physicalProcessor']} in {instance_type}" ) continue if "OnDemand" not in product["terms"]: print( f"Warning, skipping {instance_type} because there was no OnDemand terms" ) continue on_demand = list(product["terms"]["OnDemand"].values()) if len(on_demand) != 1: print( f"Warning, skipping {instance_type} because there was more than one " "OnDemand SKU" ) continue price_dimensions = list(on_demand[0]["priceDimensions"].values()) if len(price_dimensions) != 1: print( f"Warning, skipping {instance_type} because there was more than one " "priceDimensions" ) continue pricing = price_dimensions[0] if pricing["unit"] != "Hrs": print( f"Warning, skipping {instance_type} because the pricing unit is not " f"Hrs: {pricing['unit']}" ) continue if "USD" not in pricing["pricePerUnit"]: print( f"Warning, skipping {instance_type} because the pricing is not in USD" ) continue usd_price = pricing["pricePerUnit"]["USD"] try: usd_price_float = float(usd_price) except ValueError: print( f"Warning, skipping {instance_type} because the price is not a float: " f"{usd_price}" ) continue memory = attributes["memory"] if not memory.endswith(" GiB"): print( f"Warning, skipping {instance_type} because memory doesn't end in GiB: " f"{memory}" ) continue try: memory_gb_float = float(memory[: -len(" GiB")]) except ValueError: print( f"Warning, skipping {instance_type} because memory isn't an float: " f"{memory}" ) continue try: vcpu_int = int(attributes["vcpu"]) except ValueError: print( f"Warning, skipping {instance_type} because vcpu isn't an int: " f"{attributes['vcpu']}" ) continue yield CloudInstanceType( instance_type, memory_gb_float, vcpu_int, usd_price_float, 0, "on_demand" )
21,357
def EnumerateInputs(inputs): """Emumerates binary files in the provided paths.""" for current_input in inputs: components = current_input.split(':', 2) entry = components[0] pattern = re.compile(components[1]) if os.path.isdir(entry): for root, unused_subdirs, files in os.walk(entry): for file_name in files: full_path = os.path.join(root, file_name) relative_path = os.path.relpath(full_path, entry) if pattern.match(relative_path): yield { 'path': relative_path, 'open': lambda full_path=full_path: open(full_path) } else: jar = zipfile.ZipFile(entry, 'r') for entry in jar.namelist(): if pattern.match(entry): yield { 'path': entry, 'open': lambda jar=jar, entry=entry: jar.open(entry) }
21,358
def nodes(G): """Returns an iterator over the graph nodes.""" return G.nodes()
21,359
def import_all(directory): """ Execute 'from (directory) import (all files except "all.py")'. """ try: exec('import ' + directory) import_files = eval(directory + ".__all__") for import_file in import_files: if import_file != "all": exec('import ' + directory + '.' + import_file) except AttributeError: # __all__ does not exist pass except ImportError: # 1. __init__.py does not exist # 2. import_file is not file name pass
21,360
def cyclePosition(image: np.ndarray, startPosition: position) -> Union[position, bool]: """ :param image: numpy image array :param startPosition: from where to go to Tuple (x,y) :return: newPosition (x,y), or false if new coords would fall out of bounds """ if not imageWrapper.boundsChecker(image, startPosition): return False if startPosition.coords[0] == image.shape[1] - 1: if startPosition.coords[1] < image.shape[0] - 1: return position((0, startPosition.coords[1] + 1)) return False return position((startPosition.coords[0] + 1, startPosition.coords[1]))
21,361
def get_image(file_name): """retrieves an image from a file and returns it as an np array of pixels""" image_array = [] file_name = os.path.abspath(file_name) img = Image.open(file_name) img = img.convert("RGB") img = img.resize((image_size, image_size)) in_data = np.asarray(img) image_array.append(in_data) return np.array(image_array)
21,362
def context_to_ingestion_params(context): """extract the ingestion task params from job/serving context""" featureset_uri = context.get_param("featureset") featureset = context.get_store_resource(featureset_uri) infer_options = context.get_param("infer_options", InferOptions.Null) source = context.get_param("source") if source: source = get_source_from_dict(source) elif featureset.spec.source.to_dict(): source = get_source_from_dict(featureset.spec.source.to_dict()) overwrite = context.get_param("overwrite", None) targets = context.get_param("targets", None) if not targets: targets = featureset.spec.targets targets = [get_target_driver(target, featureset) for target in targets] return featureset, source, targets, infer_options, overwrite
21,363
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma, maxL_sigma, Er_vect_cal_orderedSigma_, bins, coverage_percentile): """ Bin the values of the standard deviations observed during inference and estimate a specified coverage percentile in the absolute error (observed during inference as well). Bins that have less than 50 samples are merged until they surpass this threshold. Parameters ---------- pSigma_cal_ordered_ : numpy array Array of standard deviations ordered in ascending way. minL_sigma : float Minimum value of standard deviations included in pSigma_cal_ordered_ array. maxL_sigma : numpy array Maximum value of standard deviations included in pSigma_cal_ordered_ array. Er_vect_cal_orderedSigma_ : numpy array Array ob absolute value of errors corresponding with the array of ordered standard deviations. bins : int Number of bins to split the range of standard deviations included in pSigma_cal_ordered_ array. coverage_percentile : float Value to use for estimating coverage when evaluating the percentiles of the observed absolute value of errors. Return ---------- mean_sigma : numpy array Array with the mean standard deviations computed per bin. min_sigma : numpy array Array with the minimum standard deviations computed per bin. max_sigma : numpy array Array with the maximum standard deviations computed per bin. error_thresholds : numpy array Thresholds of the errors computed to attain a certain error coverage per bin. err_err : numpy array Error bars in errors (one standard deviation for a binomial distribution estimated by bin vs. the other bins) for the calibration error. """ # thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins) thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins) classes = np.digitize(pSigma_cal_ordered_, thresholds) Nbin = np.zeros(bins + 1) for i in range(bins + 1): indices = (classes == i) Nbin[i] = indices.sum() # Repair bins new_thresholds_l = [] new_nbins_l = [] sumN = 0 for i in range(Nbin.shape[0]): sumN += Nbin[i] if sumN > 50: if i > (thresholds.shape[0] - 1): new_thresholds_l.append(thresholds[-1]) else: new_thresholds_l.append(thresholds[i]) new_nbins_l.append(sumN) sumN = 0 new_thresholds = np.array(new_thresholds_l) new_nbins = np.array(new_nbins_l) new_thresholds[-1] = thresholds[-1] new_nbins[-1] += sumN # classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1]) error_thresholds = -1. * np.ones(new_nbins.shape[0]) mean_sigma = -1. * np.ones(new_nbins.shape[0]) min_sigma = -1. * np.ones(new_nbins.shape[0]) max_sigma = -1. * np.ones(new_nbins.shape[0]) err_err = -1. * np.ones(new_nbins.shape[0]) Ncal = pSigma_cal_ordered_.shape[0] for i in range(error_thresholds.shape[0]): indices = (classes == i) n_aux = indices.sum() assert n_aux == new_nbins[i] print('Points in bin %d: %d' % (i, n_aux)) mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices]) min_sigma[i] = np.min(pSigma_cal_ordered_[indices]) max_sigma[i] = np.max(pSigma_cal_ordered_[indices]) error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile) err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i] return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err
21,364
def add_user(request): """注册用户""" info = {} tpl_name = 'user/add_user.html' if request.method == 'POST': # 保存用户提交数据 nickname = request.POST.get('nickname') if User.objects.filter(nickname__exact=nickname).exists(): # "昵称" 存在 info = {'error':'"昵称"存在'} # 显示注册页面 return render(request,tpl_name,info) password = request.POST.get('password') password2 = request.POST.get('password2') if password != password2: # 2次密码不一样 info = {'error': '2次密码不一致'} # 显示注册页面 return render(request,tpl_name,info) age = request.POST.get('age') sex = request.POST.get('sex') f_in = request.FILES.get('icon') user = User(nickname=nickname,password=password,age=age,sex=sex) if f_in: user.icon.save(f_in.name,f_in,save=False) user.set_password(password) user.save() # 在session中,记录用户信息 request.session['uid'] = user.id request.session['nickname'] = user.nickname # 跳转到用户信息 url = '/user/read_user/?uid={}'.format(user.id) return redirect(url) else: # 显示注册页面 return render(request,tpl_name,info)
21,365
def process_input(input_string, max_depth): """ Clean up the input, convert it to an array and compute the longest array, per feature type. """ # remove the quotes and extra spaces from the input string input_string = input_string.replace('"', '').replace(', ', ',').strip() # convert the string to an array and also track the longest array, so # we know how many levels for the feature type. tmp = [] if input_string: tmp = input_string.split(',') if max_depth < len(tmp): max_depth = len(tmp) # return the array and the depth return tmp, max_depth
21,366
def generate_new_shape() -> tuple[int, list[int], list[int]]: """Generate new shape #0: hot_cell_y = [0,1,2,3] hot_cell_x = [5,5,5,5] X X X X #1: hot_cell_y = [0,0,0,0] hot_cell_x = [3,4,5,6] XXXX #2: hot_cell_y = [0,1,0,1] hot_cell_x = [4,4,5,5] XX XX #3. hot_cell_y = [0,0,1,1] hot_cell_x = [4,5,5,6] XX XX #4. hot_cell_y = [0,1,1,2] hot_cell_x = [4,4,5,5] X XX X #5. hot_cell_y = [0,1,2,2] hot_cell_x = [4,4,4,5] X X XX #6. hot_cell_y = [1,0,1,1] hot_cell_x = [4,4,5,6] X XXX """ shape_id = random.randint(1, 7) logger.info("generating shape id => " + str(shape_id)) shape_color = shape_id if(shape_id == 2): shape_y_pos_list = [0, 0, 0, 0] shape_x_pos_list = [3, 4, 5, 6] elif(shape_id == 3): shape_y_pos_list = [0, 1, 0, 1] shape_x_pos_list = [4, 4, 5, 5] elif(shape_id == 4): shape_y_pos_list = [0, 0, 1, 1] shape_x_pos_list = [4, 5, 5, 6] elif(shape_id == 5): shape_y_pos_list = [0, 1, 1, 2] shape_x_pos_list = [4, 4, 5, 5] elif(shape_id == 6): shape_y_pos_list = [0, 1, 2, 2] shape_x_pos_list = [4, 4, 4, 5] elif(shape_id == 7): shape_y_pos_list = [0, 1, 1, 1] shape_x_pos_list = [4, 4, 5, 6] else: shape_y_pos_list = [0, 1, 2, 3] shape_x_pos_list = [5, 5, 5, 5] return (shape_color, shape_x_pos_list, shape_y_pos_list)
21,367
def get_process_list(process): """Analyse the process description and return the Actinia process chain and the name of the processing result :param process: The process description :return: (output_names, actinia_process_list) """ input_names, process_list = analyse_process_graph(process) output_names = [] # First analyse the data entrie if "data_id" not in process: raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME) output_names.append(process["data_id"]) pc = create_process_chain_entry(input_name=process["data_id"]) process_list.append(pc) # Then add the input to the output for input_name in input_names: # Create the output name based on the input name and method output_name = input_name output_names.append(output_name) return output_names, process_list
21,368
def createAES(key, IV, implList=None): """Create a new AES object. :type key: str :param key: A 16, 24, or 32 byte string. :type IV: str :param IV: A 16 byte string :rtype: tlslite.utils.AES :returns: An AES object. """ if implList is None: implList = ["openssl", "pycrypto", "python"] for impl in implList: if impl == "openssl" and cryptomath.m2cryptoLoaded: return openssl_aes.new(key, 2, IV) elif impl == "pycrypto" and cryptomath.pycryptoLoaded: return pycrypto_aes.new(key, 2, IV) elif impl == "python": return python_aes.new(key, 2, IV) raise NotImplementedError()
21,369
def periodic(interval: int, action, actionargs=()) -> None: """Fucntion that periodicaly calls anoter function""" # Keep calling a function on a periodic basis being the interval s.enter(interval, 1, periodic, (interval, action, actionargs)) action(*actionargs) s.run()
21,370
def command_info(opts): """Display general information from a .zs file's header. Usage: zs info [--metadata-only] [--] <zs_file> zs info --help Arguments: <zs_file> Path or URL pointing to a .zs file. An argument beginning with the four characters "http" will be treated as a URL. Options: -m, --metadata-only Output only the file's metadata, not any general information about it. Output will be valid JSON. """ with open_zs(opts, parallelism=0) as z: if opts["--metadata-only"]: info = z.metadata else: info = OrderedDict() info["root_index_offset"] = z.root_index_offset info["root_index_length"] = z.root_index_length info["total_file_length"] = z.total_file_length info["codec"] = z.codec info["data_sha256"] = (binascii.hexlify(z.data_sha256) .decode("ascii")) info["metadata"] = z.metadata info["statistics"] = OrderedDict() info["statistics"]["root_index_level"] = z.root_index_level json.dump(info, sys.stdout, indent=4) sys.stdout.write("\n") return 0
21,371
def main(): """ This is called from main.py and will enter the NSApp main loop """ assert NSThread.isMainThread() global app app = NSApplication.sharedApplication() setup() print "entering GUI main loop" app.run() sys.exit()
21,372
def load_triple(cdict, label2words, extend=True): """ Loading triples of color modifiers Parameters ---------- cdict : dict Color dictionary maps a string to list of rgb tuples. label2words : dict Dictionary mapping color labels to color names. Returns ------- dict: Triples can be formed by colors in the cdict dictionary. """ bypass_quantifier = ["almost","cobalt"] file_comp = os.path.join(BASE_DIR, "comparatives.txt") quan_comp = os.path.join(BASE_DIR, "quantifiers.txt") to_compara = dict(line.strip().split(":") for line in open(file_comp, encoding="utf-8")) to_more_quanti = dict(line.strip().split(":") for line in open(quan_comp, encoding="utf-8")) triples = [] for label in cdict: words = label2words[label].split() if len(words) > 1: quantifier, base = words[0], "".join(words[1:]) if quantifier == "very": base = "".join(words[2:]) quantifier = words[1] if base in cdict: if words[1] in to_compara: triples.append((base, ("more", to_compara[quantifier]), tuple(label2words[base].split()), label)) else: if base in cdict: if quantifier in to_compara: # uni-gram('lighter',) triples.append((base, (to_compara[quantifier],), tuple(label2words[base].split()), label)) elif quantifier in to_more_quanti: # bigram('more','bluish') triples.append((base, ("more", to_more_quanti[quantifier]), tuple(label2words[base].split()), label)) else: if extend: # this adds more power, but not increase AUC if quantifier not in bypass_quantifier: triples.append((base, ("more", quantifier), tuple(label2words[base].split()), label)) return triples
21,373
def update_status_issue(issue, status_id, notes): """Request to change the status of a problem in a redmine project. 'issue': A hash of the issue is bound to a redmine project. 'status_id': Id status used by redmine the project. 'notes': Comments about the update. Return value: 0 - on success non zero - HTTP protocol errors are valid responses. """ values = '''{ "issue": { "status_id": "%s", "notes": "%s" } }''' % (status_id, notes) req = Request( '%s/issues/%s.json' % (_service_host_project(), issue), data=values.encode(), method='PUT') req.add_header('Content-Type', 'application/json') req.add_header('X-Redmine-API-Key', _service_access_key()) try: with urlopen(req) as context: pass return 0 if context.code == 200 else context.code except HTTPError as err: print('The server couldn\'t fulfill the request.') print('Error code: ', err.code) except URLError as err: print('We failed to reach a server.') print('Reason: ', err.reason)
21,374
def update_count_activities(): """Armazena na variável de sessão o número de atividades que estejam pendentes, com os seguintes status: 0 - Criado 1 - Atrasado 2 - Pendente de Parceiro 3 - Pendente de Cliente """ count_activities = 0 if session.get('user_role') == 2: sales_person = SalesPerson.query.filter_by(id_user = session.get('user_id')).first() count_activities = db.session.execute('select count(id) as c from Activity where id_sales_person=' + str(sales_person.id) + ' and id_status < 4').scalar() else: count_activities = db.session.execute('select count(id) as c from Activity where id_status < 4').scalar() session["count_activities"] = count_activities if count_activities > 0 else "" return
21,375
def _get_avgiver_epost(root: ET.Element, ns: dict) -> Optional[str]: """ Sought: the email of the submitter Can be found in a child element (<mets:note>) of an <mets:agent> with ROLE="OTHER", OTHERROLE="SUBMITTER", TYPE="INDIVIDUAL" """ try: agent = [ agent for agent in _get_agent_elements(root, ns) if ( agent.get("ROLE") == "OTHER" and agent.get("OTHERROLE") == "SUBMITTER" and agent.get("TYPE") == "INDIVIDUAL" ) ].pop() notes = agent.findall("mets:note", namespaces=ns) email = [ note.text for note in notes if "@" in note.text ].pop() return email except IndexError: return None
21,376
def clean(dir_): """Filters the dir_ directory (splits into dir_cleaned and dir_junk).""" dir_cleaned = join(DATA_DIR, dir_ + '_cleaned_final') _make_labeled_dir_structure(dir_cleaned) dir_junk = join(DATA_DIR, dir_ + '_junk_final') _make_labeled_dir_structure(dir_junk) black_list = read_lines( IMAGES_BLACKLIST_FILE, line_func=lambda l: l.rstrip() ) for class_dir in CLASSES: class_dir_abs = join(DATA_DIR, dir_, class_dir) for file_name in listdir(class_dir_abs): if not file_name.endswith('.jpg'): continue src_path = join(class_dir_abs, file_name) if _is_clean_image(black_list, src_path): dest_path = join(dir_cleaned, class_dir, file_name) else: dest_path = join(dir_junk, class_dir, file_name) copyfile(src_path, dest_path)
21,377
def xavier_init(fan_in, fan_out, constant=1): """ Xavier initialization of network weights\ """ low = -constant * np.sqrt(6.0 / (fan_in + fan_out)) high = constant * np.sqrt(6.0 / (fan_in + fan_out)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32, seed=np.random.randint(0, 1e9))
21,378
def _module_errors_to_junit_test_case(module_error, suit_xml_section): """ Given ModuleError writes a test_case section section to a suit_xml section :param module_error: :type module_error: ModuleErrors :param suit_xml_section: xml context element :type suit_xml_section: ET.Element """ test_case = ET.SubElement( suit_xml_section, "testcase", name="{}".format(module_error.module_name), time="0.0", ) for error in module_error.pylint_error_list: elm = ET.SubElement( test_case, "failure", type="{code}: {msg}".format( code=error.msg_id, msg=error.symbol, ), message="{file}:{line}".format( file=error.path, line=error.line, ), ) # new line helps some CI's be more pretty elm.text = "{}\n".format( error.message.strip() if error.message else '' )
21,379
def test_github_actions_ci(options_baked): """The generated ci.yml file should pass a sanity check.""" ci_text = Path(".github/workflows/ci.yml").read_text() assert 'pip install -r requirements/ci.txt' in ci_text
21,380
def strToMat3(dbstr): """ convert a string like e00, e01, e02, ... into Mat3 :param str: :return: panda Mat4 """ exx = dbstr.split(',') exxdecimal = map(float, exx) assert(len(exxdecimal) is 16) return Mat3(exxdecimal[0], exxdecimal[1], exxdecimal[2], exxdecimal[4], exxdecimal[5], exxdecimal[6], exxdecimal[8], exxdecimal[9], exxdecimal[10])
21,381
def map2(func, *matrix): """ Maps a function onto the elements of a matrix Also accepts multiple matrices. Thus matrix addition is map2(add, matrix1, matrix2) """ matrix2 = [] for i in xrange(len(matrix[0])): row2 = [] matrix2.append(row2) for j in xrange(len(matrix[0][i])): args = [x[i][j] for x in matrix] row2.append(func(* args)) return matrix2
21,382
def register_libtype(cls): """Registry of library types we may come across when parsing XML. This allows us to define a few helper functions to dynamically convery the XML into objects. See buildItem() below for an example. """ LIBRARY_TYPES[cls.TYPE] = cls return cls
21,383
def construct_tablepath(fmdict, prefix=''): """ Construct a suitable pathname for a CASA table made from fmdict, starting with prefix. prefix can contain a /. If prefix is not given, it will be set to "ephem_JPL-Horizons_%s" % fmdict['NAME'] """ if not prefix: prefix = "ephem_JPL-Horizons_%s" % fmdict['NAME'] return prefix + "_%.0f-%.0f%s%s.tab" % (fmdict['earliest']['m0']['value'], fmdict['latest']['m0']['value'], fmdict['latest']['m0']['unit'], fmdict['latest']['refer'])
21,384
def _grad_mulAux(kern,x,y,yerr,original_kernel): """ __grad_mulAux() its necesary when we are dealing with multiple terms of sums and multiplications, example: ES*ESS + ES*ESS*WN + RQ*ES*WN and not having everything breaking apart Parameters kern = kernel in use x = range of values of the independent variable (usually time) y = range of values of te dependent variable (the measurments) yerr = error in the measurments original_kernel = original kernel (original sum) being used Returns See _grad_mul(kernel,x,y,yerr) for more info """ original_kernel = original_kernel cov_matrix = build_matrix(original_kernel,x,yerr) listof__kernels = [kern.__dict__["k2"]] #to put each kernel separately kernel_k1 = kern.__dict__["k1"] while len(kernel_k1.__dict__) == 2: listof__kernels.insert(0,kernel_k1.__dict__["k2"]) kernel_k1=kernel_k1.__dict__["k1"] listof__kernels.insert(0,kernel_k1) #each kernel is now separated kernelaux1 = []; kernelaux2 = [] for i, e in enumerate(listof__kernels): kernelaux1.append(listof__kernels[i]) kernelaux2.append(_kernel_deriv(listof__kernels[i])) grad_result = [] kernelaux11 = kernelaux1; kernelaux22 = kernelaux2 ii = 0 while ii<len(listof__kernels): kernelaux11 = kernelaux1[:ii] + kernelaux1[ii+1 :] _kernels = _np.prod(_np.array(kernelaux11)) for ij, e in enumerate(kernelaux22[ii]): result = _grad_lp(kernelaux2[ii][ij]*_kernels,x,y,yerr,cov_matrix) grad_result.insert(0,result) kernelaux11 = kernelaux1;kernelaux22=kernelaux2 ii = ii+1 grad_result = grad_result[::-1] return grad_result
21,385
def get_unbiased_p_hat(number_candidates, c1, c2, p): """Get the p_hat to unbias miracle. Args: number_candidates: The number of candidates to be sampled. c1: The factor that the conditional density of z given x is proportional to if the inner product between x and z is more than gamma. c2: The factor that the conditional density of z given x is proportional to if the inner product between x and z is less than gamma. p: The probability with which privunit samples an unit vector from the shaded spherical cap associated with input (see original privunit paper). Returns: p_hat: The probability with which unbiased miracle will sample an unit vector from the shaded spherical cap associated with input. """ # Compute the fraction of candidates that lie inside the cap. beta = np.array(range(number_candidates + 1)) / number_candidates pi_in = 1 / number_candidates * (c1 / (beta * c1 + (1 - beta) * c2)) p_hat = np.sum( stats.binom.pmf(range(number_candidates + 1), number_candidates, p / c1) * range(number_candidates + 1) * pi_in) return p_hat
21,386
def test_send_message_with_an_invalid_recaptcha_response_400_error(client, subtests): """Test 400 error returned when reCAPTCHA response is invalid.""" for recaptcha_response in ['some_invalid_token', 'another_invalid_token']: with subtests.test(recaptcha_response=recaptcha_response): with mock.patch('requests.post', autospec=True) as mock_post: mock_json = mock_post.return_value.json mock_json.return_value = _a_site_verify_response_with( success=False, error_codes=['invalid-input-response']) response = client.post( '/messages', data=_a_message_form_with( g_recaptcha_response=recaptcha_response)) assert response.status_code == http.HTTPStatus.BAD_REQUEST assert response.content_type == 'application/json' assert json.loads(response.data) == { 'code': http.HTTPStatus.BAD_REQUEST, 'name': 'Bad Request', 'description': 'The recaptcha_response parameter value ' f'"{recaptcha_response}" was not valid.', }
21,387
def mkdir(path): """ Make a directory if it doesn't exist. :param path: directory path """ if not os.path.exists(path): os.makedirs(path)
21,388
def prox_gradf(xy, step): """Gradient step""" return xy-step*grad_f(xy)
21,389
def CBND(x, y, rho): """ A function for computing bivariate normal probabilities. :: Alan Genz Department of Mathematics Washington State University Pullman, WA 99164-3113 Email : alangenz@wsu.edu This function is based on the method described by :: Drezner, Z and G.O. Wesolowsky, (1990), On the computation of the bivariate normal integral, Journal of Statist. Comput. Simul. 35, pp. 101-107, with major modifications for double precision, and for ``|R|`` close to 1. This code was originally transelated into VBA by Graeme West """ W = numpy.zeros((11,4)) XX = numpy.zeros((11,4)) W[1][1] = 0.17132449237917 XX[1][1] = -0.932469514203152 W[2][1] = 0.360761573048138 XX[2][1] = -0.661209386466265 W[3][1] = 0.46791393457269 XX[3][1] = -0.238619186083197 W[1][2] = 4.71753363865118E-02 XX[1][2] = -0.981560634246719 W[2][2] = 0.106939325995318 XX[2][2] = -0.904117256370475 W[3][2] = 0.160078328543346 XX[3][2] = -0.769902674194305 W[4][2] = 0.203167426723066 XX[4][2] = -0.587317954286617 W[5][2] = 0.233492536538355 XX[5][2] = -0.36783149899818 W[6][2] = 0.249147045813403 XX[6][2] = -0.125233408511469 W[1][3] = 1.76140071391521E-02 XX[1][3] = -0.993128599185095 W[2][3] = 4.06014298003869E-02 XX[2][3] = -0.963971927277914 W[3][3] = 6.26720483341091E-02 XX[3][3] = -0.912234428251326 W[4][3] = 8.32767415767048E-02 XX[4][3] = -0.839116971822219 W[5][3] = 0.10193011981724 XX[5][3] = -0.746331906460151 W[6][3] = 0.118194531961518 XX[6][3] = -0.636053680726515 W[7][3] = 0.131688638449177 XX[7][3] = -0.510867001950827 W[8][3] = 0.142096109318382 XX[8][3] = -0.37370608871542 W[9][3] = 0.149172986472604 XX[9][3] = -0.227785851141645 W[10][3] = 0.152753387130726 XX[10][3] = -7.65265211334973E-02 if numpy.abs(rho) < 0.3: NG = 1 LG = 3 elif numpy.abs(rho) < 0.75: NG = 2 LG = 6 else: NG = 3 LG = 10 h = -x k = -y hk = h * k BVN = 0 if numpy.abs(rho) < 0.925: if numpy.abs(rho) > 0: hs = (h * h + k * k) / 2. asr = math.asin(rho) for i in range(1,LG+1): for ISs in [-1,1]: sn = math.sin(asr * (ISs * XX[i][NG] + 1) / 2) BVN = BVN + W[i][NG] * numpy.exp((sn * hk - hs) / (1 - sn * sn)) BVN = BVN * asr / (4. * numpy.pi) BVN = BVN + CND(-h) * CND(-k) else: if rho < 0: k = -k hk = -hk if numpy.abs(rho) < 1.: Ass = (1. - rho) * (1. + rho) A = numpy.sqrt(Ass) bs = (h - k) ** 2 c = (4. - hk) / 8. d = (12. - hk) / 16. asr = -(bs / Ass + hk) / 2. if asr > -100: BVN = A * numpy.exp(asr) * (1 - c * (bs - Ass) * (1 - d * bs / 5.) / 3. + c * d * Ass * Ass / 5.) if -hk < 100: b = numpy.sqrt(bs) BVN = BVN - numpy.exp(-hk / 2.) * numpy.sqrt(2. * numpy.pi) * CND(-b / A) * b * (1. - c * bs * (1. - d * bs / 5.) / 3.) A = A / 2 for i in range(1,LG+1): for ISs in [-1,1]: xs = (A * (ISs * XX[i][NG] + 1)) ** 2 rs = numpy.sqrt(1 - xs) asr = -(bs / xs + hk) / 2 if asr > -100: BVN = BVN + A * W(i, NG) * numpy.exp(asr) * (numpy.exp(-hk * (1 - rs) / (2 * (1 + rs))) / rs - (1 + c * xs * (1 + d * xs))) BVN = -BVN / (2. * numpy.pi) if rho > 0.: BVN = BVN + CND(-max(h, k)) else: BVN = -BVN if k > h: BVN = BVN + CND(k) - CND(h) CBND = BVN return CBND
21,390
def prune(root: Node, copy: bool = True) -> Node: """ Prune (or simplify) the given SPN to a minimal and equivalent SPN. :param root: The root of the SPN. :param copy: Whether to copy the SPN before pruning it. :return: A minimal and equivalent SPN. :raises ValueError: If the SPN structure is not a directed acyclic graph (DAG). :raises ValueError: If an unknown node type is found. """ # Copy the SPN before proceeding, if specified if copy: root = deepcopy(root) # Check the SPN check_spn(root, labeled=True, smooth=True, decomposable=True) nodes = topological_order(root) if nodes is None: raise ValueError("SPN structure is not a directed acyclic graph (DAG)") # Build a dictionary that maps each id of a node to the corresponding node object nodes_map = dict(map(lambda n: (n.id, n), nodes)) # Proceed by reversed topological order for node in reversed(nodes): # Skip leaves if isinstance(node, Leaf): continue # Retrieve the children nodes from the mapping children_nodes = list(map(lambda n: nodes_map[n.id], node.children)) if len(children_nodes) == 1: nodes_map[node.id] = children_nodes[0] elif isinstance(node, Product): # Subsequent product nodes, concatenate the children of them children = list() for child in children_nodes: if not isinstance(child, Product): children.append(child) continue product_children = map(lambda n: nodes_map[n.id], child.children) children.extend(product_children) nodes_map[node.id].children = children elif isinstance(node, Sum): # Subsequent sum nodes, concatenate the children of them and adjust the weights accordingly # Important! This implementation take care also of directed acyclic graphs (DAGs) children_weights = defaultdict(float) for i, child in enumerate(children_nodes): if not isinstance(child, Sum): children_weights[child] += node.weights[i] continue sum_children = map(lambda n: nodes_map[n.id], child.children) for j, sum_child in enumerate(sum_children): children_weights[sum_child] += node.weights[i] * child.weights[j] children, weights = zip(*children_weights.items()) nodes_map[node.id].weights = np.array(weights, dtype=node.weights.dtype) nodes_map[node.id].children = children else: raise ValueError("Unknown node type called {}".format(node.__class__.__name__)) return assign_ids(nodes_map[root.id])
21,391
def verify_sacct(csv_str): """ Ensure that native format is vaguely valid (treat it as a |-separated csv) """ data = pandas.read_csv(io.StringIO(csv_str), sep="|") assert len(data) > 0
21,392
def GetRevisionAndLogs(slave_location, build_num): """Get a revision number and log locations. Args: slave_location: A URL or a path to the build slave data. build_num: A build number. Returns: A pair of the revision number and a list of strings that contain locations of logs. (False, []) in case of error. """ if slave_location.startswith('http://'): location = slave_location + '/builds/' + str(build_num) else: location = os.path.join(slave_location, str(build_num)) revision = False logs = [] fp = None try: if location.startswith('http://'): fp = urllib2.urlopen(location) contents = fp.read() revisions = re.findall(r'<td class="left">got_revision</td>\s+' '<td>(\d+)</td>\s+<td>Source</td>', contents) if revisions: revision = revisions[0] logs = [location + link + '/text' for link in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)] else: fp = open(location, 'rb') build = cPickle.load(fp) properties = build.getProperties() if properties.has_key('got_revision'): revision = build.getProperty('got_revision') candidates = os.listdir(slave_location) logs = [os.path.join(slave_location, filename) for filename in candidates if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)] except urllib2.URLError, e: logging.exception('Error reading build URL "%s": %s', location, str(e)) return False, [] except (IOError, OSError), e: logging.exception('Error reading build file "%s": %s', location, str(e)) return False, [] finally: if fp: fp.close() return revision, logs
21,393
def get_json(url, **kwargs): """Downloads json data and converts it to a dict""" raw = get(url, **kwargs) if raw == None: return None return json.loads(raw.decode('utf8'))
21,394
def get_text(im): """ 得到图像中的文本部分 """ return im[3:24, 116:288]
21,395
def homepage(): """Display tweets""" tweet_to_db() output = [a for a in Tweet.query.order_by(desc('time_created')).all()] # to display as hyper links for tweet in output: tweet.handle = linkyfy(tweet.handle, is_name=True) tweet.text = linkyfy(tweet.text) return render_template("home.html", output=output)
21,396
def watch_list_main_get(): """ Render watch list page. Author: Jérémie Dierickx """ watchlist = env.get_template('watchlists.html') return header("Watch List") + watchlist.render(user_name=current_user.pseudo) + footer()
21,397
def input_file(inp_str): """ Parse the input string """ # Parse the sections of the input into keyword-val dictionaries train_block = ioformat.ptt.symb_block(inp_str, '$', 'training_data') fform_block = ioformat.ptt.symb_block(inp_str, '$', 'functional_form') exec_block = ioformat.ptt.symb_block(inp_str, '$', 'fortran_execution') train_dct = ioformat.ptt.keyword_dct_from_block( train_block[1], formatvals=False) fform_dct = ioformat.ptt.keyword_dct_from_block( fform_block[1], formatvals=False) exec_dct = ioformat.ptt.keyword_dct_from_block( exec_block[1], formatvals=False) # Set defaults (maybe use fancy version later if more defaults can be set) if 'Units' not in train_dct: train_dct['Units'] = DEFAULT_DCT['Units'] # Check that the dictionaries are built correctly _check_dcts(train_dct, fform_dct, exec_dct) return train_dct, fform_dct, exec_dct
21,398
def exit_on_error(number, *args): """Output on error.""" sys.stderr.write("sine_finder_version=%s\n" % __version_no__) if args: error_txt = __errorcodes__[number] % tuple(args) else: error_txt = __errorcodes__[number] sys.stderr.write(__error_msg__ % (number, error_txt)) sys.exit()
21,399