content
stringlengths
22
815k
id
int64
0
4.91M
def RemoveContacts(client, obj_store, user_id, device_id, request): """Remove contacts.""" request['user_id'] = user_id yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'RemoveContactsOperation.Execute', request) logging.info('REMOVE CONTACTS: user: %d, device: %d, contact_count: %d' % (user_id, device_id, len(request['contacts']))) raise gen.Return({})
5,332,600
def test_get_meta_strings_wrong_type(): """A call to Dataset.get_meta_strings is made with the wrong type """ d = Dataset("test_dataset") with pytest.raises(TypeError): d.get_meta_strings(42)
5,332,601
def get_freesurfer_matrix_ras2vox(): """ Get standard matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes. Get matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes. See the documentation for get_freesurfer_matrix_vox2ras for background information. Returns ------- 2D numpy array The affine transformation matrix, a float matrix with shape (4, 4). """ return npl.inv(get_freesurfer_matrix_vox2ras())
5,332,602
def parse_create_table(string): """Parse the create table sql query and return metadata Args: string(sql): SQL string from a SQL Statement Returns: table_data(dict): table_data dictionary for instantiating a table """ # Parse the base table definitions table_data = to_dict(get_base_parser().parseString(string)) # Parse the columns and append to the list table_data['columns'] = list() table_data['constraints'] = list() column_position = 0 for field in table_data['raw_fields']: try: column = to_dict(get_column_parser().parseString(field)) # Add position of the column column['position'] = column_position column_position += 1 # Change fk_reference_column to string from list if FK_REFERENCE in column: column[FK_REFERENCE] = column[FK_REFERENCE][0] table_data['columns'].append(column) except ParseException: try: constraint = to_dict( get_constraints_parser().parseString(field)) table_data['constraints'].append(constraint) except ParseException: logger.error(field) raise return table_data
5,332,603
def handle_volumes(region, config): """Try to attach volumes""" volumes = config.get("volumes", {}) # attach ESB volumes first if "ebs" in volumes: handle_ebs_volumes(region, volumes.get("ebs")) # then take care of any RAID definitions if "raid" in volumes: handle_raid_volumes(volumes.get("raid"))
5,332,604
def retrieve_context_connection_connection_by_id(uuid): # noqa: E501 """Retrieve connection by ID Retrieve operation of resource: connection # noqa: E501 :param uuid: ID of uuid :type uuid: str :rtype: Connection """ return 'do some magic!'
5,332,605
def read_filenames(path): """ Read all file names from `path` and match them against FILENAME_REGEX. Arguments: - path: path to the directory containing CSV data files. Returns: - list of tuples of every filename and regex match to the CSV filename format in the specified directory """ daily_filenames = [(f, FILENAME_REGEX.match(f)) for f in listdir(path) if isfile(join(path, f))] return daily_filenames
5,332,606
def _sp_sleep_for(t: int) -> str: """Return the subprocess cmd for sleeping for `t` seconds.""" return 'python -c "import time; time.sleep({})"'.format(t)
5,332,607
def verify_show_environment(dut, verify_str_list): """ To get show environment. Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) """ command = "show environment" output = utils.remove_last_line_from_string(st.show(dut, command, skip_tmpl=True)) result = True for item in verify_str_list: if not re.findall(item, output, re.IGNORECASE): st.error("Item '{}' is NOT found".format(item)) result = False return result
5,332,608
def database(): """In memory blank test database from the declarative model.""" engine = create_engine("sqlite:///:memory:", future=True) session_maker = sessionmaker( autocommit=False, autoflush=False, bind=engine, future=True, ) Base.metadata.create_all(engine) # create our model __populate_test_data(session_maker()) yield session_maker engine.dispose()
5,332,609
def profile_time(trace_name, name, enabled=True, stream=None, end_stream=None): """Print time spent by CPU and GPU. Useful as a temporary context manager to find sweet spots of code suitable for async implementation. From: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/utils/profiling.py Usage: >> with profile_time('test_trace', 'test_infer') as pt: >> model(inputs) """ if (not enabled) or not torch.cuda.is_available(): yield return stream = stream if stream else torch.cuda.current_stream() end_stream = end_stream if end_stream else stream start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) stream.record_event(start) try: cpu_start = time.monotonic() yield finally: cpu_end = time.monotonic() end_stream.record_event(end) end.synchronize() cpu_time = (cpu_end - cpu_start) * 1000 gpu_time = start.elapsed_time(end) msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' print(msg, end_stream)
5,332,610
def plot() -> None: """Open the interactive plotting window.""" if figure.data.z2n.size == 0: click.secho("The periodogram was not calculated yet.", fg='yellow') else: figure.plot_figure() plt()
5,332,611
def get_labelset_keys(): """get labelset keys Given DATA_CFG, return slideviewer labelsets Args: none Returns: list: a list of labelset names """ cfg = ConfigSet() label_config = cfg.get_value(path=const.DATA_CFG+'::LABEL_SETS') labelsets = [cfg.get_value(path=const.DATA_CFG+'::USE_LABELSET')] if cfg.get_value(path=const.DATA_CFG+'::USE_ALL_LABELSETS'): labelsets = list(label_config.keys()) return labelsets
5,332,612
def gen_mf_html(pypeit_file, qa_path): """ Generate the HTML for a MasterFrame set Args: pypeit_file (str): Name of the PypeIt file, no path qa_path (str): Path to the QA folder """ # TODO: Can this instead just use the pypeit file? # Read calib file calib_file = pypeit_file.replace('.pypeit', '.calib') with open(calib_file, 'r') as infile: calib_dict = yaml.load(infile, Loader=yaml.FullLoader) # Parse setup = list(calib_dict.keys())[0] cbsets = [] for key in calib_dict[setup].keys(): if key == '--': continue #if isinstance(key,str): # dets.append(int(key)) else: cbsets.append(key) # TODO -- Read in spectograph from .pypeit file and then use spectrograph.ndet dets = (1+np.arange(99)).tolist() # Generate MF file MF_filename = os.path.join('{:s}'.format(qa_path), 'MF_{:s}.html'.format(setup)) body = '' with open(MF_filename,'w') as f: # Start links = html_init(f, 'QA Setup {:s}: MasterFrame files'.format(setup)) # Loop on calib_sets for cbset in cbsets: for det in dets: # Run idval = '{:s}_{:d}_{:02d}'.format(setup, cbset, det) new_links, new_body = html_mf_pngs(idval) # Save links += new_links body += new_body # End html_end(f, body, links) # print("Wrote: {:s}".format(MF_filename))
5,332,613
def create_instrument_level_pattern(instrument_symbols: List[str]) -> str: """Creates a regular expression pattern to target all the instrument symbols in a list. The function creates a regular expression pattern to target, within a specific DC message, the portion of the message containing the complete instrument symbol, for each instrument symbol included in the list passed as an input of the function. Parameters ---------- instrument_symbols: List[str] A list of the stable components of the futures instrument symbols. Returns ------- str A regular expression pattern. """ specific_instrument_regexes = [ create_specific_instrument_regex(name) for name in instrument_symbols ] return rf"({'|'.join(specific_instrument_regexes)})"
5,332,614
def localtime(nist_lookup=0, localtime=DateTime.localtime,utctime=utctime): """ Returns the current local time as DateTime instance. Same notes as for utctime(). """ return localtime(utctime(nist_lookup).gmticks())
5,332,615
def device_now(): """Return datetime object constructed from 'now' on device.""" cmd = "adb shell date '+%Y:%m:%d:%H:%M:%S'" lines = u.docmdlines(cmd) line = lines.pop(0) if line is None: u.error("unable to interpret output from '%s'" % cmd) d = line.split(":") try: dt = datetime(int(d[0]), int(d[1]), int(d[2]), int(d[3]), int(d[4]), int(d[5])) return dt except ValueError: u.error("unable to parse/interpret output " "from cmd '%s' (value %s)" % (cmd, line))
5,332,616
def copy_safe_request(request): """ Copy selected attributes from a request object into a new fake request object. This is needed in places where thread safe pickling of the useful request data is needed. """ meta = { k: request.META[k] for k in HTTP_REQUEST_META_SAFE_COPY if k in request.META and isinstance(request.META[k], str) } return NautobotFakeRequest( { "META": meta, "POST": request.POST, "GET": request.GET, "FILES": request.FILES, "user": request.user, "path": request.path, "id": getattr(request, "id", None), # UUID assigned by middleware } )
5,332,617
def sha256(firmware_filename, firmware_size=None): """Returns the sha256 hash of the firmware""" hasher = hashlib.sha256() # If firmware size is supplied, then we want a sha256 of the firmware with its header if firmware_size is not None: hasher.update(b"\x00" + firmware_size.to_bytes(4, "little")) with open(firmware_filename, "rb", buffering=0) as file: while True: chunk = file.read(128) if not chunk: break hasher.update(chunk) return hasher.digest()
5,332,618
async def async_unload_entry(hass, entry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: hass.data[DOMAIN][entry.entry_id].stop() return unload_ok
5,332,619
def is_serial_increased(old, new): """ Return true if serial number was increased using RFC 1982 logic. """ old, new = (int(n) for n in [old, new]) diff = (new - old) % 2**32 return 0 < diff < (2**31 - 1)
5,332,620
def generate_docs(): """ info: generates_docs for salt community :return: """ path = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('\\')) os.system('salt-call --local --file-root={0} state.sls generate_docs'.format(path))
5,332,621
def svn_relpath_skip_ancestor(parent_relpath, child_relpath): """svn_relpath_skip_ancestor(char const * parent_relpath, char const * child_relpath) -> char const *""" return _core.svn_relpath_skip_ancestor(parent_relpath, child_relpath)
5,332,622
def rnn_model(input_dim, units, activation, output_dim=29): """ Build a recurrent network for speech """ # Main acoustic input input_data = Input(name='the_input', shape=(None, input_dim)) # Add recurrent layer simp_rnn = GRU(units, activation=activation, return_sequences=True, implementation=2, name='rnn')(input_data) bn_rnn = BatchNormalization(name='bn_rnn')(simp_rnn) time_dense = TimeDistributed(Dense(output_dim))(bn_rnn) # Add softmax activation layer y_pred = Activation('softmax', name='softmax')(time_dense) # Specify the model model = Model(inputs=input_data, outputs=y_pred) model.output_length = lambda x: x print(model.summary()) return model
5,332,623
def _pad_statistic(arr, pad_width, stat_length, stat_op): """ pads the array with values calculated along the given axis, used in mode: "maximum", "minimum", "mean" """ ndim = arr.ndim shape = arr.shape if stat_length is None: stat_length = _make_stat_length(shape) else: stat_length = _convert_pad_to_nd(stat_length, ndim) stat_length = _limit_stat_length(stat_length, shape) for i in range(ndim): pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i) pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),) pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i) pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),) tensor_with_pad = pad_before + (arr,) + pad_after arr = concatenate(tensor_with_pad, axis=i) return arr
5,332,624
def register_do(mysql, json): """ helper function that registers data objects into MySQL DB @param mysql: a mysql object for MySQL database @param json: metadata that contains information for data source and device """ cnx = mysql.connect() cursor = cnx.cursor() dataSource = json["dataSource"] device = json["device"] deviceSummary = json["deviceSummary"] dataSource_arr = [dataSource["name"], int(dataSource["srcID"])] device_arr = [int(device["ID"]), int(device["dataSize"]), device["location"], device["name"], int(device["srcID"]), device["type"]] deviceSummary_arr = [int(deviceSummary["ID"]), deviceSummary["accessDuration"], int(deviceSummary["deviceID"])] cursor.execute("INSERT INTO dataSource (name, srcID) VALUES (%s, %d)", dataSource_arr) cursor.execute("INSERT INTO device (ID, dataSize, location, name, srcID, type) VALUES (%d, %d, %s, %s, %d, %s)", device_arr) cursor.execute("INSERT INTO deviceSummary (ID, accessDuration, deviceID) VALUES (%d, %s, %d)", deviceSummary_arr) cnx.commit() return "data object registration success"
5,332,625
def reraise_wrapped_error(error: Exception): """Wraps failures with better error messages. Args: error: The exception. We must be inside a raise. Raises: ImportError: Typically if there is a version mismatch. """ if 'undefined symbol' in str(error).lower(): six.reraise(ImportError, ImportError('%s\nOrignal error:\n%s' % ( UNDEFINED_SYMBOL_ERROR_MESSAGE, error)), sys.exc_info()[2]) raise error
5,332,626
def split_amount(amount, splits, places=2): """Return list of ``splits`` amounts where sum of items equals ``amount``. >>> from decimal import Decimal >>> split_amount(Decimal('12'), 1) Decimal('12.00') >>> split_amount(Decimal('12'), 2) [Decimal('6.00'), Decimal('6.00')] Amounts have a max of ``places`` decimal places. Last amount in the list may not be the same as others (will always be lower than or equal to others). >>> split_amount(Decimal('100'), 3) [Decimal('33,34'), Decimal('33,34'), Decimal('33,32')] >>> split_amount(Decimal('100'), 3, 4) [Decimal('33,3334'), Decimal('33,3334'), Decimal('33,3332')] >>> split_amount(Decimal('12'), 7) # Doctest: +ELLIPSIS [Decimal('1.72'), ..., Decimal('1.72'), ..., Decimal('1.68')] >>> split_amount(Decimal('12'), 17) # Doctest: +ELLIPSIS [Decimal('0.71'), ..., Decimal('0.71'), Decimal('0.64')] """ one = decimal.Decimal(10) ** -places amount = amount.quantize(one) with decimal.localcontext() as decimal_context: decimal_context.rounding = decimal.ROUND_UP upper_split = (amount / splits).quantize(one) splitted_amounts = [upper_split] * (splits - 1) lower_split = amount - sum(splitted_amounts) splitted_amounts.append(lower_split) return splitted_amounts
5,332,627
def shift_compare_date(df, date_field, smaller_eq_than_days=1, compare_with_next=False): """ ATENTION: This Dataframe need to be sorted!!! """ from datetime import timedelta if compare_with_next: s = ( (df[date_field].shift(-1) - df[date_field] ) <= timedelta(days=smaller_eq_than_days) ) & ( (df[date_field].shift(-1) - df[date_field]) > timedelta(days=0) ) else: s = ( (df[date_field] - df[date_field].shift(1) ) <= timedelta(days=smaller_eq_than_days) ) & ( (df[date_field] - df[date_field].shift(1)) >= timedelta(days=0) ) return s
5,332,628
def format_alleles(variant): """Gets a string representation of the variant's alleles. Args: variant: nucleus.genomics.v1.Variant. Returns: A string ref_bases/alt1,alt2 etc. """ return '{}/{}'.format(variant.reference_bases, ','.join( variant.alternate_bases))
5,332,629
def _validate_inputs(input_list, input_names, method_name): """ This method will validate the inputs of other methods. input_list is a list of the inputs passed to a method. input_name is a list of the variable names associated with input_list method_name is the name of the method whose input is being validated. _validate_inputs will verify that all of the inputs in input_list are: 1) of the same type 2) either numpy arrays or instances of numbers.Number (floats or ints) 3) if they are numpy arrays, they all have the same length If any of these criteria are violated, a RuntimeError will be raised returns True if the inputs are numpy arrays; False if not """ if isinstance(input_list[0], np.ndarray): desired_type = np.ndarray elif isinstance(input_list[0], numbers.Number): desired_type = numbers.Number else: raise RuntimeError("The arg %s input to method %s " % (input_names[0], method_name) + "should be either a number or a numpy array") valid_type = True bad_names = [] for ii, nn in zip(input_list, input_names): if not isinstance(ii, desired_type): valid_type = False bad_names.append(nn) if not valid_type: msg = "The input arguments:\n" for nn in bad_names: msg += "%s,\n" % nn msg += "passed to %s " % method_name msg += "need to be either numbers or numpy arrays\n" msg += "and the same type as the argument %s" % input_names[0] msg += "\n\nTypes of arguments are:\n" for name, arg in zip(input_names, input_list): msg += '%s: %s\n' % (name, type(arg)) raise RuntimeError(msg) if desired_type is np.ndarray: same_length = True for ii in input_list: if len(ii) != len(input_list[0]): same_length = False if not same_length: raise RuntimeError("The arrays input to %s " % method_name + "all need to have the same length") if desired_type is np.ndarray: return True return False
5,332,630
def install(app=None, timeout=0.02, engine=None): """ Creates a :class:`~PySide.QtCore.QTimer` instance that will be triggered continuously to call :func:`Engine.poll() <pants.engine.Engine.poll>`, ensuring that Pants remains responsive. ========= ======== ============ Argument Default Description ========= ======== ============ app None *Optional.* The :class:`~PySide.QtCore.QCoreApplication` to attach to. If no application is provided, it will attempt to find an existing application in memory, or, failing that, create a new application instance. timeout ``0.02`` *Optional.* The maximum time to wait, in seconds, before running :func:`Engine.poll() <pants.engine.Engine.poll>`. engine *Optional.* The :class:`pants.engine.Engine` instance to use. ========= ======== ============ """ global timer global _timeout global _engine _engine = engine or Engine.instance() _engine._install_poller(_Qt()) if app is None: app = QCoreApplication.instance() if app is None: app = QCoreApplication([]) _timeout = timeout * 1000 timer = QTimer(app) timer.timeout.connect(do_poll) timer.start(_timeout)
5,332,631
def server_base_url(environ): """ Using information in tiddlyweb.config, construct the base URL of the server, sans the trailing /. """ return '%s%s' % (server_host_url(environ), _server_prefix(environ))
5,332,632
def calc_ctrlg_ratio(rpl: sc2reader.resources.Replay, pid: int) -> dict[str, float]: """Calculates the ratio between `ControlGroupEvents` and the union of the `CommandEvents`, `SelectionEvents` and `ControlGroupCommand` sets to quantify the players' level of awareness and use of this tactical feature. *Args* - rpl (sc2reader.resources.Replay) The replay being analysed. - pid (int) In-game player ID of the player being considered in the analysis. *Returns* - (dict[str, float]) """ command_secs = {e.second for e in rpl.events if isinstance(e, sc2reader.events.game.CommandEvent) and e.pid == (pid - 1)} select_secs = {e.second for e in rpl.events if isinstance(e, sc2reader.events.game.SelectionEvent) and e.pid == (pid - 1)} ctrlg_secs = {e.second for e in rpl.events if isinstance(e, sc2reader.events.game.ControlGroupEvent) and e.pid == (pid - 1)} total_counted_events = len(command_secs | select_secs | ctrlg_secs) if not total_counted_events: return {"ctrlg_ratio": 0} return {"ctrlg_ratio": len(ctrlg_secs)/total_counted_events}
5,332,633
def data_exists(date, hour=None): """ Checks if there is a directory with daily data files for given date and hour(s) Parameters ---------- date: str Expected date format is yyyy/mm/dd hour: int or array-like, default None Specific hour(s) to check, has to be in the range of 0-23 Returns ------- : bool, or array of bools If all hours exist returns True. If some of them do not exist returns a bool array. """ if hour is None: file_path = get_day_folder_path(date) if os.path.exists(file_path): return True else: # get hours with data for relevant date data_hours = get_hours_with_data(date) # bool array - for every hour True data if data exists, otherwise False mask_hours = np.isin(hour, data_hours) # if all requested hours exist return True, if only some of them return an array if mask_hours.all(): return True else: return mask_hours return False
5,332,634
def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None): """ Main routine for plotting a single roccurve """ # Get a default ax if none is given if ax is None: import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,8)) ax = fig.gca() # Plot the base line ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray') # Plot the single roccurve line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax) line.set_label(bkgs[0].get_category()) # Plot settings ax.set_xlim(0.0, 1.05) ax.set_ylim(0.0, 1.05) ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE) ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE) ax.legend(fontsize=DEFAULT_FONTSIZE) return ax
5,332,635
def clear_screen(screen: pg.Surface, color=BACKGROUND) -> None: """Clears screen i.e. draws a rectangle that covers the whole <screen> of color <BACKGROUND>""" pg.draw.rect(screen, color, (0, 0, WIDTH, HEIGHT))
5,332,636
def draw_normal_surface(pcd, scale, estimation_params=None): """Draw and return a mesh of arrows of normal vectors for each point in the given cloud Parameters ---------- pcd : o3d.geometry.PointCloud Input point cloud scale : float Scale of the default arrow which is 1 meter length estimation_params : dict, optional Normal estimatino parameters if input does not contain normals, by default None Returns ------- o3d.geometry.TriangleMesh Collection of normal arrows as a single triangle mesh """ if len(pcd.normals) != len(pcd.points): pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(**estimation_params)) arrow_params = get_default_arrow(scale) normal_surface = None pairs = zip(np.asarray(pcd.points), np.asarray(pcd.normals)) for point, normal in tqdm(pairs, total=len(pcd.points), ncols=100): arrow = draw_arrow(point, normal, (0, 1, 0), arrow_params) if normal_surface is None: normal_surface = arrow else: normal_surface += arrow return normal_surface
5,332,637
def test_where_with_kwargs(CTX): """ Tests that subclasses of a class can be stored in the same collection. :return: """ expected_dict = { 'Washington D.C.': { 'cityName': 'Washington D.C.', 'country': 'USA', 'capital': True, 'obj_type': "MunicipalityM", 'doc_id': 'DC', 'doc_ref': 'CityM/DC' }, 'San Francisco': { 'cityName': 'San Francisco', 'cityState': 'CA', 'country': 'USA', 'capital': False, 'regions': ['west_coast', 'norcal'], 'obj_type': "StandardCityM", 'doc_id': 'SF', 'doc_ref': 'CityM/SF' }, 'Los Angeles': { 'cityName': 'Los Angeles', 'cityState': 'CA', 'country': 'USA', 'capital': False, 'regions': ['west_coast', 'socal'], 'obj_type': "StandardCityM", 'doc_id': 'LA', 'doc_ref': 'CityM/LA' } } res_dict = dict() for obj in CityM.where(country="USA"): d = obj.to_dict() res_dict[obj.city_name] = d assert res_dict['Washington D.C.'] == expected_dict['Washington D.C.'] assert res_dict['San Francisco'] == expected_dict['San Francisco'] assert res_dict['Los Angeles'] == expected_dict['Los Angeles']
5,332,638
def texture_from_clusters(clusters): """ Compute the GLCM texture properties from image clusters. :param clusters: clusters of pixels representing sections of the image :returns: DataFrame -- of texture features for every cluster. """ thetas = np.arange(0, np.pi, np.pi/8) props = ['contrast', 'dissimilarity', 'homogeneity', 'energy'] tex_features = [] for i, cluster in enumerate(clusters): prop_suffix = '_cluster_%d' % (i+1) col_names = [name + prop_suffix for name in props] features = glcm_features(cluster, [1], thetas, props) # compute mean across all orientations features = np.mean(features, axis=2) df = pd.DataFrame(features.T, columns=col_names) tex_features.append(df) return pd.concat(tex_features, axis=1)
5,332,639
def parse_dict(input_data): """Return a rules dict of the format: { 'light red': [(1, 'bright white'), (2, 'muted yellow')], 'dark orange': [(3, bright white), (4, muted yellow)], 'faded blue': [(0, 'bags')] } """ bags = dict() for line in input_data.split('\n'): outer, inner = line.strip().split(' bags contain ') inner = [i.split(' ') for i in inner.split(", ")] if 'no' in inner[0]: bags[outer] = [(0, 'bags')] else: bags[outer] = [(int(i[0]), ' '.join(i[1:3])) for i in inner] return bags
5,332,640
def write_table_header(op2_file, fascii, table_name): """ Writes the beginning of an op2 table Parameters ---------- op2_file : file the op2 file object table_name : str the table name to write """ table0 = [ 4, 2, 4, 8, table_name.encode('ascii'), 8, #4, 0, 4, ] assert len(table_name) == 8, table_name table0_format = '<4i 8s i' st = Struct(table0_format) op2_file.write(st.pack(*table0)) fascii.write('%s header0 = %s\n' % (table_name, table0))
5,332,641
def extract_vars(samples_file_name,n_burnin,v_names,debug,stride=1): """From a file with samples in ascii format, with the first line containing the label for each column, extract the columns with the labels in v_names and return them in a numpy array. Remove n_burnin samples from the top. Only read one in every stride number of lines after that.""" # Open text file with all samples, samples_file = open(samples_file_name,"r") #sample_lines = samples_file.readlines() #samples_file.close() # Extract first line with the column labels and find the column # numbers corresponding to the variables of interest. #labels_line = sample_lines[0].rstrip('\n') labels_line = samples_file.readline().rstrip('\n') col_labels = [lbl for lbl in labels_line.split()] v_indices = [] for s_v in v_names: try: i_v = col_labels.index(s_v) v_indices.append(i_v) except ValueError: print "Variable", s_v, "is not found in the list of labels", col_labels sys.exit(1) if (debug > 0): print "Column labels in file",samples_file_name,"are:",col_labels for i_v in range(len(v_names)): print "The column number of",v_names[i_v],"is:",v_indices[i_v] # Read subsequent lines, leaving out the first n_burnin, and only one # in every stride lines after that samples_list = [] line_no = 0 done = 0 while not done: line = samples_file.readline() if (line == ""): done = 1 else: line_no += 1 if (line_no > n_burnin and (line_no - n_burnin) % stride == 0): records = line.split() num_records = [float(s) for s in records] samples_list.append(num_records) # Close the file samples_file.close() # Remove the last line if is has a value < 0 (i.e. -1) in the acceptance_prob column try: i_ap = col_labels.index("acceptance_prob") except ValueError: i_ap = -1 # If this is a file with acceptance probabilities if (i_ap >= 0): # And the last line has a negative acceptance probability if(samples_list[-1][i_ap] < 0): # Remove the last line del samples_list[-1] if (debug > 0): print "The last sample line has been deleted as it contained the MAP values" # Convert list to array steady_samples = np.array(samples_list) # Remove burn-in samples from the top #if (n_burnin > 0): # steady_samples = all_samples[n_burnin:,:] #else: # steady_samples = all_samples #if (debug > 0): # print "Removed", n_burnin, "burn-in samples" # Extract all columns of interest samples_cols = [] for i_v in v_indices: samples_cols.append(steady_samples[:,i_v]) samples = np.array(samples_cols).T if (debug > 0): print "Shape of samples array:",samples.shape n_samples = len(samples[:,0]) n_vars = len(samples[0,:]) if (debug > 0): print "Read in", n_samples, "regular samples of", n_vars, "variables from file", samples_file_name return samples
5,332,642
def distance_without_normalise(bin_image): """ Takes a binary image and returns a distance transform version of it. """ res = np.zeros_like(bin_image) for j in range(1, bin_image.max() + 1): one_cell = np.zeros_like(bin_image) one_cell[bin_image == j] = 1 one_cell = distance_transform_cdt(one_cell) res[bin_image == j] = one_cell[bin_image == j] res = res.astype('uint8') return res
5,332,643
def classify_sentiment(rows_per_file: int, senti4sd_pool_root: str, inpath: str, outpath: str): """Classify sentiment in the inpath file using any classificationTask.sh scripts found in the directory tree starting from senti4sd_pool_root. Result is placed at outpath. The input file will be split in subfiles of size rows_per_file to avoid running out of memory. This is a machinue and JVM-dependent figure and the optimal value will vary across setups. """ classifiers = find_classifiers(senti4sd_pool_root) with tempfile.TemporaryDirectory() as tmpdir: sources = split_file(rows_per_file, inpath, dir=tmpdir) dests = [ tempfile.NamedTemporaryFile(dir=tmpdir, delete=False).name for _ in range(len(sources)) ] _classify_all(sources, dests, classifiers) concatenate_predictions(dests, outpath)
5,332,644
def list_inventory(): """ Returns all of the Inventory """ app.logger.info('Request for inventory list') inventory = [] category = request.args.get('category') name = request.args.get('name') condition = request.args.get('condition') count = request.args.get('count') available = request.args.get('available') if category: inventory = Inventory.find_by_category(category) elif name: inventory = Inventory.find_by_name(name) else: inventory = Inventory.all() results = [inventory.serialize() for inventory in inventory] return make_response(jsonify(results), status.HTTP_200_OK)
5,332,645
def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser() parser.add_argument('-i', '--input-dir', help="Directory which contains the input data", required=True) parser.add_argument('-o', '--output-dir', help="Directory which will hold the output data", required=True) parser.add_argument('-p', '--num-processes', default=4, help="Number of processes to spawn for file conversion") parser.add_argument('-c', '--compression', default=None, help="Compression Type.") return parser.parse_args()
5,332,646
def is_instance_method(obj): """Checks if an object is a bound method on an instance.""" if not isinstance(obj, MethodType): return False # Not a method elif obj.__self__ is None: return False # Method is not bound elif ( issubclass(obj.__self__.__class__, type) or hasattr(obj.__self__, "__class__") and obj.__self__.__class__ ): return False # Method is a classmethod return True
5,332,647
def _clean_empty_and_duplicate_authors_from_grobid_parse(authors: List[Dict]) -> List[Dict]: """ Within affiliation, `location` is a dict with fields <settlement>, <region>, <country>, <postCode>, etc. Too much hassle, so just take the first one that's not empty. """ # stripping empties clean_authors_list = [] for author in authors: clean_first = author['first'].strip() clean_last = author['last'].strip() clean_middle = [m.strip() for m in author['middle']] clean_suffix = author['suffix'].strip() if clean_first or clean_last or clean_middle: author['first'] = clean_first author['last'] = clean_last author['middle'] = clean_middle author['suffix'] = clean_suffix clean_authors_list.append(author) # combining duplicates (preserve first occurrence of author name as position) key_to_author_blobs = {} ordered_keys_by_author_pos = [] for author in clean_authors_list: key = (author['first'], author['last'], ' '.join(author['middle']), author['suffix']) if key not in key_to_author_blobs: key_to_author_blobs[key] = author ordered_keys_by_author_pos.append(key) else: if author['email']: key_to_author_blobs[key]['email'] = author['email'] if author['affiliation'] and (author['affiliation']['institution'] or author['affiliation']['laboratory'] or author['affiliation']['location']): key_to_author_blobs[key]['affiliation'] = author['affiliation'] dedup_authors_list = [key_to_author_blobs[key] for key in ordered_keys_by_author_pos] return dedup_authors_list
5,332,648
def save_weights(weights): """ Saves the weights after the processing of video. """ if os.path.exists(WEIGHTS_DIR): os.remove(WEIGHTS_DIR) try: os.stat('weights') except: os.mkdir('weights') np.save(WEIGHTS_DIR, weights) print("Weights saved successfully!")
5,332,649
def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands))
5,332,650
def get_followers(api, user_id): """Returns list of followers""" followers = [] next_max_id = '' while next_max_id is not None: _ = api.getUserFollowers(user_id, maxid=next_max_id) followers.extend(api.LastJson.get('users', [])) next_max_id = api.LastJson.get('next_max_id', '') return followers
5,332,651
def inference(model_path, json_path, q='question', ref='reference answer', stud='student answers', result_path='results.json'): """ Generate feedback to student answers and save to file :param model_path: string / path to model :param json_path: string / path to json file with data :param q: string / json key for test question :param ref: string / json key for reference answer :param stud: string / json key for student answers :param result_path: string / path to save location :return: None """ with open(json_path) as f: data = json.load(f) ckpt = LitSAFT5.load_from_checkpoint(model_path) tokenizer = ckpt.tokenizer question, reference, answers = data[q], data[ref], data[stud] # determine mode from model path mode = os.path.normpath(model_path).split(os.sep)[2].split('_') if mode[0] == 'wq': with_question, label, language = True, mode[1], mode[3] else: with_question, label, language = False, mode[0], mode[2] # preprocess text texts = [] for ans in answers: if language == "en": if with_question: text = "justify and grade: question: " + question + " student: " + ans + " reference: " + reference else: text = "justify and grade: student: " + ans + " reference: " + reference elif language == "ger": if with_question: text = "Erkläre und bewerte: " + question + " Antwort: " + ans + " Lösung: " + reference else: text = "Erkläre und bewerte: Antwort: " + ans + " Lösung: " + reference else: raise ValueError("language parameter only accepts strings \'ger\' and \'en\' for German or " "English respectively.") texts.append(text.lower()) tokenized = tokenizer(texts, padding=True, truncation=True, max_length=256, return_tensors="pt") # generate feedback model = ckpt.model model.eval() model.cuda() generated_feedback = [] for b in tqdm.tqdm(batch(tokenized, batch_size=ckpt.batch_size), total=math.ceil(len(tokenized.input_ids) / ckpt.batch_size)): generated_feedback += [tokenizer.decode(x, skip_special_tokens=True) for x in ckpt.model.generate( input_ids=b['input_ids'].cuda(), attention_mask=b['attention_mask'].cuda())] df = pd.DataFrame(columns=['Question', 'Reference Answer', 'Student Answer', label.capitalize(), 'Feedback']) if label == 'score': df[label.capitalize()] = [x.split()[0] for x in generated_feedback] df['Feedback'] = extract_model_pred(generated_feedback) elif label == 'ver': df[label.capitalize()] = extract_label(generated_feedback) df['Feedback'] = extract_pred(generated_feedback) df['Question'], df['Reference Answer'], df['Student Answer'] = question, reference, answers df.to_json(result_path)
5,332,652
def main(dataset='all'): """ Parameters ---------- dataset : string Either 'all' or a path to a yaml symbol file. """ cfg = utils.get_database_configuration() mysql = cfg['mysql_online'] connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() # TODO: no formulas, only single-symbol ones. formulas = get_formulas(cursor, dataset) prob = {} # Go through each formula and download every raw_data instance for formula in formulas: stroke_counts = [] recordings = [] sql = (("SELECT `wm_raw_draw_data`.`id`, `data`, `is_in_testset`, " "`wild_point_count`, `missing_line`, `user_id`, " "`display_name` " "FROM `wm_raw_draw_data` " "JOIN `wm_users` ON " "(`wm_users`.`id` = `wm_raw_draw_data`.`user_id`) " "WHERE `accepted_formula_id` = %s " "AND wild_point_count=0 " "AND has_correction=0 " # "AND `display_name` LIKE 'MfrDB::%%'" ) % str(formula['id'])) cursor.execute(sql) raw_datasets = cursor.fetchall() logging.info("%s (%i)", formula['formula_in_latex'], len(raw_datasets)) for raw_data in raw_datasets: try: handwriting = HandwrittenData(raw_data['data'], formula['id'], raw_data['id'], formula['formula_in_latex'], raw_data['wild_point_count'], raw_data['missing_line'], raw_data['user_id']) stroke_counts.append(len(handwriting.get_pointlist())) recordings.append(handwriting) except Exception as e: logging.info("Raw data id: %s", raw_data['id']) logging.info(e) if len(stroke_counts) > 0: logging.info("\t[%i - %i]", min(stroke_counts), max(stroke_counts)) median = numpy.median(stroke_counts) logging.info("\tMedian: %0.2f\tMean: %0.2f\tstd: %0.2f", median, numpy.mean(stroke_counts), numpy.std(stroke_counts)) # Make prob s = sorted(Counter(stroke_counts).items(), key=lambda n: n[1], reverse=True) key = formula['formula_in_latex'] prob[key] = {} for stroke_nr, count in s: prob[key][stroke_nr] = count # Outliers modes = get_modes(stroke_counts) logging.info("\tModes: %s", modes) exceptions = [] for rec in recordings: if len(rec.get_pointlist()) not in modes: url = (("http://www.martin-thoma.de/" "write-math/view/?raw_data_id=%i - " "%i strokes") % (rec.raw_data_id, len(rec.get_pointlist()))) dist = get_dist(len(rec.get_pointlist()), modes) exceptions.append((url, len(rec.get_pointlist()), dist)) print_exceptions(exceptions, max_print=10) else: logging.debug("No recordings for symbol " "'http://www.martin-thoma.de/" "write-math/symbol/?id=%s'.", formula['id']) write_prob(prob, "prob_stroke_count_by_symbol.yml")
5,332,653
def plot_graph_embedding(y_emb, labels, adj, line_alpha=0.2, s=7, title=""): """ Plots the visualization of graph-structured data Args: y_emb (np.array): low dimensional map of data points, matrix of size n x 2 labels (np.array): underlying class labels, matrix of size n x 1 adj (scipy csr matrix): adjacency matrix """ labels = np.array([int(l) for l in labels]) # adj = sp.coo_matrix(adj) adj = adj.tocoo() colormap = np.array(Category20_20 + Category20b_20 + Accent8) f, ax = plt.subplots(1, sharex='col', figsize=(6, 4), dpi=800) ax.set_axis_off() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.set_title(title) # Plot edges p0 = y_emb[adj.row, :] p1 = y_emb[adj.col, :] p_0 = [tuple(row) for row in p0] p_1 = [tuple(row) for row in p1] classA = labels[adj.row] classB = labels[adj.col] mask = classA == classB edge_colormask = mask * (classA + 1) - 1 lines = list(zip(p_0, p_1)) lc = mc.LineCollection(lines, linewidths=0.5, colors=colormap[edge_colormask]) lc.set_alpha(line_alpha) ax.add_collection(lc) ax.scatter(y_emb[:, 0], y_emb[:, 1], s=s, c=colormap[labels]) ax.margins(0.1, 0.1) plt.autoscale(tight=True) plt.tight_layout() plt.savefig('weights/' + args.dataset + '_' + args.alg_type + '_' + args.adj_type + '_' + str(args.sub_version) + '_vis.png') plt.show()
5,332,654
def map(v, ds, de, ts, te): """\ Map the value v, in range [ds, de] to the corresponding value in range [ts, te] """ d1 = de - ds d2 = te - ts v2 = v - ds r = v2 / d1 return ts + d2 * r
5,332,655
def get_Q_body(hs_type, Theta_SW_hs): """温水暖房用熱源機の筐体放熱損失 (2) Args: hs_type(str): 温水暖房用熱源機の種類 Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度 Returns: ndarray: 温水暖房用熱源機の筐体放熱損失 """ if hs_type in ['石油従来型暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '不明']: # (2a) return [234 * 3600 * 10 ** (-6)] * 24 * 365 elif hs_type in ['石油潜熱回収型暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']: # (2b) return (5.3928 * Theta_SW_hs - 71.903) * 3600 * 10 ** (-6) else: raise ValueError(hs_type)
5,332,656
def softmax(inputs): """ Calculate the softmax for the give inputs (array) :param inputs: :return: """ return np.exp(inputs) / float(sum(np.exp(inputs)))
5,332,657
def get_config_list(ranking, ckpt_path2is_3class): """Assemble a model list for a specific task based on the ranking. In addition to bundling information about the ckpt_path and whether to model_uncertainty, the config_list also lists the value of the metric to aid debugging. Args: ranking (list): list containing (Path, float), corresponding to checkpoint-metric pairs ranked from best to worst by metric value ckpt_path2is_3class (dict): mapping from ckpt_path to is_3class (whether to model_uncertainty) Returns: config_list (list): list bundling information about ckpt_path, model_uncertainty, and metric value """ config_list = [] for ckpt_path, value in ranking: is3_class = ckpt_path2is_3class[ckpt_path] ckpt_info = {'ckpt_path': str(ckpt_path), 'is_3class': is3_class, 'value': value} config_list.append(ckpt_info) return config_list
5,332,658
def _get_turn_angle(start_angle, target_angle): """ Difference in angle in the range -180 to +180 (where negative is counter clockwise) Parameters ---------- start_angle, target_angle : float Returns ------- float difference in angle. """ return _map_to_pm180(target_angle - start_angle)
5,332,659
def load(fname): """Load symbol from a JSON file. You can also use pickle to do the job if you only work on python. The advantage of load/save is the file is language agnostic. This means the file saved using save can be loaded by other language binding of mxnet. You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) Parameters ---------- fname : str The name of the file, examples: - `s3://my-bucket/path/my-s3-symbol` - `hdfs://my-bucket/path/my-hdfs-symbol` - `/path-to/my-local-symbol` Returns ------- sym : Symbol The loaded symbol. See Also -------- Symbol.save : Used to save symbol into file. """ if not isinstance(fname, string_types): raise TypeError('fname need to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle))) return Symbol(handle)
5,332,660
def filter_pairs(pairs): """returns pairs of with filter_pair()==True""" return [pair for pair in pairs if filter_pair(pair)]
5,332,661
def step_impl(context): """ :type context: behave.runner.Context """ assert context.is_compliant is True logger.info("The car is compliant")
5,332,662
def merge(list_geo, npts=5): """ merge a list of cad_geometries and update internal/external faces and connectivities Args: list_geo: a list of cad_geometries Returns: a cad_geometries """ geo_f = list_geo[0] for geo in list_geo[1:]: geo_f = geo_f.merge(geo, npts=npts) return geo_f
5,332,663
async def test_internal_sketch_name( command, context, node_before, sketch_name, gateway, message_schema, ): """Test internal sketch name command.""" with context: async for msg in gateway.listen(): assert message_schema.dump(msg) == command break for node in gateway.nodes.values(): assert node.sketch_name == sketch_name
5,332,664
def save_upstream(client, stage, data): """ Put a bunch of param values into AWS """ prefix = f"/jobbergate-api/{stage}" for k, v in data.items(): client.put_parameter( Name=f"{prefix}/{k}", Value=v, Type="String", Overwrite=True, ) yield (k, v)
5,332,665
def imshow(image: Imagelike, module: str = None, **kwargs) -> None: """Show the given image. FIXME[todo]: Showing an image can be done in different ways: - blocking=True: the execution of the main program is blocked. The display will run an event loop to guarantee a responsive GUI behaviour. Blocking may stop on different occassions - when the display window is closed (either by GUI or programmatically) - after some timeout (the display window may then either close or switch into non-blocking mode, or stay open and unresponsive. the last should only be used, if a new image will be shown by the caller immediately after she regained control) - blocking=False: the execution of the main program is continued. The image display may start some background thread to ensure a responsive GUI behaviour - unblock: the unblock option specifies what should happen, when a blocking display ends its block: 'close': close the display window 'show': continue showing the image in non-blocking mode. 'freeze': continue showing the image but without event loop, leaving a frozen (unresponsive) image display window. The caller is responsible for dealing with this window (either closing it or showing some new image). """ display = get_display(module=module) if image is None: display.close() else: display.show(image, **kwargs) return display
5,332,666
def update(frame_number): """Creating stopping condition for the animation to reach stopping stage The animation will carry on and update each frame as the agents move, eat and interact with other agents in the environment. The animation will stop running when the random state of 0.1 is reached. And we have print the stopping condition which is an image showing the condition of environment and also the agents after reaching the stopping condition """ fig.clear() global carry_on #The global variable set before is passed here #If it gets a random number mentioned it stops if random.random() < 0.1: #The random() function within the random module, #That generates float number betweer 0 and 1. carry_on = False print("stopping condition") #Prints the final condition of the environment #Agents behaviour, movement eating and sharing condtion set for j in range(num_of_iterations):#Move the agent number of iteration time for i in range(num_of_agents): agents[i].move() agents[i].eat() agents[i].share_with_neighbours(neighbourhood) #Create plot of agents in environment matplotlib.pyplot.xlim(0, 99) #Set the x-axis, i.e, it will go from 0 to 99 matplotlib.pyplot.ylim(0, 99) #Set the y-axis, from 0 to 99 matplotlib.pyplot.imshow(environment) #Display the image of environment for i in range(num_of_agents): matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
5,332,667
def terminate_processes_matching_cmd_line(match_strings, kill=False, exclude_strings=None): """Terminates processes matching particular command line (case sensitive).""" if exclude_strings is None: # By default, do not terminate processes containing butler.py. This is # important so that the reproduce tool does not terminate itself, as the # rest of its command line may contain strings we usually terminate such # as paths to build directories. exclude_strings = ['butler.py', 'reproduce.sh'] if isinstance(match_strings, str): match_strings = [match_strings] for process in psutil.process_iter(): try: process_info = process.as_dict(attrs=['cmdline', 'pid']) process_cmd_line = process_info['cmdline'] if not process_cmd_line: continue process_path = ' '.join(process_cmd_line) except (psutil.AccessDenied, psutil.NoSuchProcess, OSError): continue if any(x in process_path for x in match_strings): if not any([x in process_path for x in exclude_strings]): terminate_process(process_info['pid'], kill)
5,332,668
def test_atomic_language_max_length_4_nistxml_sv_iv_atomic_language_max_length_5_1(mode, save_output, output_format): """ Type atomic/language is restricted by facet maxLength with value 11. """ assert_bindings( schema="nistData/atomic/language/Schema+Instance/NISTSchema-SV-IV-atomic-language-maxLength-5.xsd", instance="nistData/atomic/language/Schema+Instance/NISTXML-SV-IV-atomic-language-maxLength-5-1.xml", class_name="NistschemaSvIvAtomicLanguageMaxLength5", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,332,669
def download_data(): """Download data""" if not PATH.exists(): command = ["download_chandra_obsid", f"{OBS_ID}"] execute_command(command=command) else: log.info(f"Skipping download, {PATH} already exists.")
5,332,670
def prepare_commonvoice( corpus_dir: Pathlike, output_dir: Pathlike, languages: Union[str, Sequence[str]] = "auto", splits: Union[str, Sequence[str]] = COMMONVOICE_DEFAULT_SPLITS, num_jobs: int = 1, ) -> Dict[str, Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]]: """ Returns the manifests which consist of the Recordings and Supervisions. When all the manifests are available in the ``output_dir``, it will simply read and return them. This function expects the input directory structure of:: >>> metadata_path = corpus_dir / language_code / "{train,dev,test}.tsv" >>> # e.g. pl_train_metadata_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/train.tsv" >>> audio_path = corpus_dir / language_code / "clips" >>> # e.g. pl_audio_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/clips" Returns a dict with 3-level structure (lang -> split -> manifest-type):: >>> {'en/fr/pl/...': {'train/dev/test': {'recordings/supervisions': manifest}}} :param corpus_dir: Pathlike, the path to the downloaded corpus. :param output_dir: Pathlike, the path where to write the manifests. :param languages: 'auto' (prepare all discovered data) or a list of language codes. :param splits: by default ``['train', 'dev', 'test']``, can also include ``'validated'``, ``'invalidated'``, and ``'other'``. :param num_jobs: How many concurrent workers to use for scanning of the audio files. :return: a dict with manifests for all specified languagues and their train/dev/test splits. """ if not is_module_available("pandas"): raise ValueError( "To prepare CommonVoice data, please 'pip install pandas' first." ) if num_jobs > 1: warnings.warn( "num_jobs>1 currently not supported for CommonVoice data prep;" "setting to 1." ) corpus_dir = Path(corpus_dir) assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}" assert output_dir is not None, ( "CommonVoice recipe requires to specify the output " "manifest directory (output_dir cannot be None)." ) output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) if languages == "auto": languages = set(COMMONVOICE_LANGS).intersection( path.name for path in corpus_dir.glob("*") ) if not languages: raise ValueError( f"Could not find any of CommonVoice languages in: {corpus_dir}" ) elif isinstance(languages, str): languages = [languages] manifests = {} for lang in tqdm(languages, desc="Processing CommonVoice languages"): logging.info(f"Language: {lang}") lang_path = corpus_dir / lang # Maybe the manifests already exist: we can read them and save a bit of preparation time. # Pattern: "cv_recordings_en_train.jsonl.gz" / "cv_supervisions_en_train.jsonl.gz" lang_manifests = read_cv_manifests_if_cached( output_dir=output_dir, language=lang ) for part in splits: logging.info(f"Split: {part}") if part in lang_manifests: logging.info( f"CommonVoice language: {lang} already prepared - skipping." ) continue recording_set, supervision_set = prepare_single_commonvoice_tsv( lang=lang, part=part, output_dir=output_dir, lang_path=lang_path, ) lang_manifests[part] = { "supervisions": supervision_set, "recordings": recording_set, } manifests[lang] = lang_manifests return manifests
5,332,671
def rmv_cmd(trgt): """Remove target with force""" result = run_command(["rm", "-rf", trgt]) if result.returncode != 0: raise Exception("Removing {} before testing failed!". format(os.path.dirname(os.path.realpath(__file__)) + trgt))
5,332,672
def plot_array_trans(pdata,a,copy=False): """ Warning!!! ---------- Latest Information: 22/05/2012 this is deprecated and plot_array_transg is used instead. Purpose: -------- Transform array according to speficication in list a. return a copy if copy is True. Example: -------- >>> b=np.arange(-9,9.1,0.5) >>> pdata=np.ones((37,37)) >>> for i in range(37): pdata[i]=b >>> a=[(-9, -4), (-1, -0.5, 0, 0.5, 1), (4, 9)] >>> plot_array_trans(pdata,a) In [104]: plot_array_trans(pdata,a) Out[104]: (array([[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ], [-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ], [-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ], ..., [-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ], [-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ], [-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ]]), [-2.0, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2.0], [-9, -4, -1, -0.5, 0, 0.5, 1, 4, 9]) """ if copy: pdata_trans=pcopy.deepcopy(pdata) else: pdata_trans=pdata low_range=a[0] mid_range=a[1] high_range=a[2] if len(mid_range)==1: raise ValueError('there is only 1 element in middle range!') else: interval=mid_range[1]-mid_range[0] # if isinstance(low_range,tuple): low_range_plot=pcopy.deepcopy(list(low_range)) else: low_range_plot=pcopy.deepcopy(list([low_range])) for i in range(len(low_range_plot)): low_range_plot[i]=mid_range[0]-interval*(len(low_range_plot)-i) if isinstance(high_range,tuple): high_range_plot=pcopy.deepcopy(list(high_range)) else: high_range_plot=pcopy.deepcopy(list([high_range])) for i in range(len(high_range_plot)): high_range_plot[i]=mid_range[-1]+interval*(i+1) if len(low_range_plot)==1: pdata_trans=arraylintrans(pdata_trans,(low_range,mid_range[0]),(low_range_plot[0],mid_range[0])) else: for i in range(len(low_range_plot))[::-1]: if i != len(low_range_plot)-1: pdata_trans=arraylintrans(pdata_trans,(low_range[i],low_range[i+1]),(low_range_plot[i],low_range_plot[i+1])) else: pdata_trans=arraylintrans(pdata_trans,(low_range[i],mid_range[0]),(low_range_plot[i],mid_range[0])) if len(high_range_plot)==1: pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range),(mid_range[-1],high_range_plot[0])) else: for i in range(len(high_range_plot)): if i ==0: pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range[0]),(mid_range[-1],high_range_plot[0])) else: pdata_trans=arraylintrans(pdata_trans,(high_range[i-1],high_range[i]),(high_range_plot[i-1],high_range_plot[i])) if not hasattr(low_range,'__iter__'): low_range=list([low_range]) if not hasattr(high_range,'__iter__'): high_range=list([high_range]) levtemp=[low_range_plot,mid_range,high_range_plot] levels=[j for i in levtemp for j in i] labtemp=[low_range,mid_range,high_range] lab=[j for i in labtemp for j in i] return pdata_trans,levels,lab
5,332,673
def heartbeat_handler(): """ Triggers SPM heartbeat and fires power on event when booting. """ try: # Check if not in state on if context["state"] != "on": # Get current status res = conn.status() old_state = context["state"] new_state = res["last_state"]["up"] # On first time only if old_state == None: # Trigger last off state event if last boot time is found try: boot_time = __salt__["rpi.boot_time"]().get("value", None) if boot_time == None: log.warning("Last boot time could not be determined") else: # Last boot time is considered identical to last power off time because of 'fake-hwclock' edmp.trigger_event({ "timestamp": boot_time }, "system/power/last_off") except: log.exception("Failed to trigger last system off event") # Trigger recover state event if res["last_trigger"]["down"] not in ["none", "rpi"]: edmp.trigger_event({ "trigger": res["last_trigger"]["down"] }, "system/power/recover") # Check if state has changed if old_state != new_state: context["state"] = new_state # Trigger state event edmp.trigger_event({ "trigger": res["last_trigger"]["up"] }, "system/power/{:}{:}".format("_" if new_state in ["booting"] else "", new_state)) finally: # Trigger heartbeat as normal conn.noop()
5,332,674
def process_generate_api_token_data(post_data): """ This expects the post_data to contain an array called ``user_to_form``. Each item in this array is of the form: .. code-block:: python '<UserID>.<form_prefix>' (i.e. '1.form-0') Each form then may add two form data key-value pairs: .. code-block:: python '<form_prefix>-expiration_date': '<date>' (i.e. 'form-0-expiration_date': '2021-06-04') """ user_to_form_pairs = [pair.split('.') for pair in post_data.getlist('user_to_form')] user_form_data = [] for user_id, form_prefix in user_to_form_pairs: user = User.objects.get(UserID=user_id) form_data = dict_filter_keys_start_with(form_prefix, post_data) date_str = '-'.join([form_data.get('ExpirationDate_year', ''), form_data.get('ExpirationDate_month', ''), form_data.get('ExpirationDate_day', '')]) expiration_date = set_date_from_str(date_str=date_str) user_form_data.append({'user': user, 'expires': expiration_date}) return user_form_data
5,332,675
def dummyfunc1(): """ Returns ------- Nothing! Examples -------- Nada """ pass
5,332,676
def add_vcolor(hemis, mesh=None, name='color'): """Seems like `hemis` is color you wish to apply to currently selected mesh.""" from bpy import context as C from bpy import data as D if mesh is None: mesh = C.scene.objects.active.data elif isinstance(mesh, str): mesh = D.meshes[mesh] bpy.ops.object.mode_set(mode='OBJECT') color = hemis if len(hemis) == 2: color = hemis[0] if len(mesh.vertices) == len(hemis[1]): color = hemis[1] vcolor = mesh.vertex_colors.new(name) if hasattr(mesh, "loops"): loopidx = [0]*len(mesh.loops) mesh.loops.foreach_get('vertex_index', loopidx) if not isinstance(color[0], (list, tuple)): for i, j in enumerate(loopidx): vcolor.data[i].color = [color[j]]*3 else: for i, j in enumerate(loopidx): vcolor.data[i].color = color[j] else: # older blender version, need to iterate faces instead print("older blender found...") if not isinstance(color[0], (list, tuple)): for i in range(len(mesh.faces)): v = mesh.faces[i].vertices vcolor.data[i].color1 = [color[v[0]]] * 3 vcolor.data[i].color2 = [color[v[1]]] * 3 vcolor.data[i].color3 = [color[v[2]]] * 3 else: for i in len(vcolor): v = mesh.faces[i].vertices vcolor.data[i].color1 = color[v[0]] vcolor.data[i].color2 = color[v[1]] vcolor.data[i].color3 = color[v[2]] print("Successfully added vcolor '%s'"%name) return vcolor
5,332,677
def gridSeach(model, parameters, features, response, train, test): """ This function performs a grid search over the parameter space. It is simplistic and only allows certain range of values. If there is a parameter in the models that needs to be a list it has to be modified. """ import itertools import pandas as pd names = sorted(parameters) combinations = list(itertools.product(*(parameters[name] for name in names))) names.append('r2') model_matrix = pd.DataFrame(columns=names) for c in combinations: dictionary = dict(zip(names, c)) model = model.set_params(**dictionary) model.fit(features[train], response[train]) if 'hidden_layer_sizes' in dictionary: dictionary.update({'hidden_layer_sizes':[dictionary['hidden_layer_sizes']], 'r2':model.score(features[test], response[test])}) else: dictionary.update({'r2':model.score(features[test], response[test])}) model_matrix = model_matrix.append(dictionary, ignore_index=True) dictionary = dict(model_matrix.ix[model_matrix['r2'].argmax(),:-1]) if 'hidden_layer_sizes' in dictionary: dictionary.update({'hidden_layer_sizes':dictionary['hidden_layer_sizes'][0]}) if 'n_neighbors' in dictionary: dictionary.update({'n_neighbors':int(dictionary['n_neighbors'])}) model = model.set_params(**dictionary) model.fit(features[train], response[train]) return (model, model_matrix)
5,332,678
def cvt_lambdef(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base: """lambdef: 'lambda' [varargslist] ':' test""" assert ctx.is_REF, [node] name = xcast(ast_cooked.NameBindsNode, cvt(node.children[0], ctx.to_BINDING())) ctx_func = new_ctx_from(ctx) if len(node.children) == 4: parameters = xcast(ast_cooked.BareTypedArgsListNode, cvt(node.children[1], ctx_func)) suite = cvt(node.children[3], ctx_func) else: parameters = ast_cooked.BareTypedArgsListNode(args=[]) suite = cvt(node.children[2], ctx_func) return ast_cooked.FuncDefStmt(name=name, parameters=parameters.args, return_type=ast_cooked.OMITTED_NODE, suite=suite, scope_bindings=ctx_func.scope_bindings)
5,332,679
def tan(x) -> None: """ 传入弧度值,计算正切。 示例:计算tan60° - math.tan(math.radians(60)) 1.732051""" ...
5,332,680
def ischapter_name(text_str): """判断是否是章节名""" if re.match(r'^第(.{1,9})([章节回卷集部篇])(\s*)(.*)', text_str): return True else: return False
5,332,681
def calc_mean_onbit_density(bitsets, number_of_bits): """Calculate the mean density of bits that are on in bitsets collection. Args: bitsets (list[pyroaring.BitMap]): List of fingerprints number_of_bits: Number of bits for all fingerprints Returns: float: Mean on bit density """ all_nr_onbits = [len(v) for v in bitsets] mean_onbit = fsum(all_nr_onbits) / float(len(all_nr_onbits)) density = mean_onbit / number_of_bits return float(density)
5,332,682
def logger_client(): """Authentification and service delivery from gmail API """ creds = None # The file token.pickle stores the user's access and refresh tokens, # and is created automatically when the authorization flow # completes for the first time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( CREDENTIALS, SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('gmail', 'v1', credentials=creds) return service
5,332,683
def get_parameters(path: str) -> List[Dict[str, Any]]: """ Retrieve parameters from AWS SSM Parameter Store. Decrypts any encrypted parameters. Relies on the appropriate environment variables to authenticate against AWS: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html """ ssm = boto3.client("ssm") next_token: Optional[bool] = True parameters: List[Dict[str, Any]] = [] while next_token is not None: kwargs = {"Path": path, "Recursive": False, "WithDecryption": True} if next_token is not True: kwargs["NextToken"] = next_token response = ssm.get_parameters_by_path(**kwargs) new_parameters = response.get("Parameters", []) parameters.extend(new_parameters) next_token = response.get("NextToken") return parameters
5,332,684
def normalizeUrl(url): """ ParseResult(scheme='https', netloc='www.tWitch.tv', path='/ludwig/clip/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE', params='', query='a=b&c=d', fragment='') Wish I could convert clips like this: https://www.twitch.tv/ludwig/clip/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE To ones like this: https://clips.twitch.tv/MoldyNiceMarjoramCharlieBitMe-6EbApxzSGbjacptE """ f = furl(url) f.path.normalize() if f.host == 'www.twitch.tv' or f.host == 'twitch.tv': m = re.match('^/[^/]+/clip/[^/]+.*$', str(f.path)) if m is not None: # TODO: Yeah pass return f.url
5,332,685
def _getPVGIS(lat, lon): """ This function uses the non-interactive version of PVGIS to extract a tmy dataset to be used to predict VRE yields for future periods. ------ inputs ------ Latitude, in decimal degrees, south is negative. Longitude, in decimal degrees, west is negative. ------- returns ------- tmy as dataframe with datetime as index, containing 9 timeseries Temperature, humidity, global horizontal, beam normal, diffuse horizontal, infrared horizontal, wind speed, wind direction and pressure. From PVGIS [https://ec.europa.eu/jrc/en/PVGIS/tools/tmy] "A typical meteorological year (TMY) is a set of meteorological data with data values for every hour in a year for a given geographical location. The data are selected from hourly data in a longer time period (normally 10 years or more). The TMY is generated in PVGIS following the procedure described in ISO 15927-4. The solar radiation database (DB) used is the default DB for the given location, either PVGIS-SARAH, PVGIS-NSRDB or PVGIS-ERA5. The other meteorogical variables are obtained from the ERA-Inteirm reanalysis." """ outputformat = "json" request_url = f"https://re.jrc.ec.europa.eu/api/tmy?lat={lat}&lon={lon}&outputformat={outputformat}" response = requests.get(request_url) if not response.status_code == 200: raise ValueError("API get request not succesfull, check your input") # store to private df df = pd.DataFrame(response.json()['outputs']['tmy_hourly']) # send to private function to set the date column as index with parser tmy = _tmy_dateparser(df) # for dataframe off-line / in-session storage tmy['lat'] = lat tmy['lon'] = lon tmy.columns = ['T', *tmy.columns[1:6].values, 'WS', 'WD', 'SP', 'lat', 'lon'] return tmy
5,332,686
def get_metric_key_samples(metricDict, metricNames, keyVal="means"): """ Returns a dictionary of samples for the given metric name, but only extracts the samples for the given key Args: metricDict (dict): Dictionary of sampled metrics metricNames (list): Names of the keys of the metric to return keyVal (str): The value of the key for which data is to be extracted. Must be one of {"mins", "maxs", "means", "vars"} Returns: Dictionary of samples of the given {"mins", "maxs", "means", "vars", "sums"} """ assert keyVal in ["mins", "maxs", "means", "vars", "sums"] retDict = get_metric_samples(metricDict, metricNames) for key in retDict: retDict[key] = retDict[key][keyVal] return retDict
5,332,687
def apply_nonbonded(nodes, scaling=1.0, suffix=""): """ Nonbonded in nodes. """ # TODO: should this be 9-6 or 12-6? return { "u%s" % suffix: scaling * esp.mm.nonbonded.lj_9_6( x=nodes.data["x"], sigma=nodes.data["sigma%s" % suffix], epsilon=nodes.data["epsilon%s" % suffix], ) }
5,332,688
def swapi_films(episode): """ Gets the films listed in the api. :param episode: :return: response json """ response = requests.get(SWAPI_API + 'films/' + str(episode)) return response
5,332,689
def prepare_qualifications(request, bids=[], lotId=None): """ creates Qualification for each Bid """ new_qualifications = [] tender = request.validated["tender"] if not bids: bids = tender.bids if tender.lots: active_lots = [lot.id for lot in tender.lots if lot.status == "active"] for bid in bids: if bid.status not in ["invalid", "deleted"]: for lotValue in bid.lotValues: if lotValue.status == "pending" and lotValue.relatedLot in active_lots: if lotId: if lotValue.relatedLot == lotId: qualification = Qualification({"bidID": bid.id, "status": "pending", "lotID": lotId}) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) else: qualification = Qualification( {"bidID": bid.id, "status": "pending", "lotID": lotValue.relatedLot} ) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) else: for bid in bids: if bid.status == "pending": qualification = Qualification({"bidID": bid.id, "status": "pending"}) qualification.date = get_now() tender.qualifications.append(qualification) new_qualifications.append(qualification.id) return new_qualifications
5,332,690
def int_format(x): """ Format an integer: - upcast to a (u)int64 - determine buffer size - use snprintf """ x = upcast(x) buf = flypy.runtime.obj.core.newbuffer(flypy.types.char, ndigits(x) + 1) formatting.sprintf(buf, getformat(x), x) return flypy.types.String(buf)
5,332,691
def _resolve_atomtypes(topology_graph, typemap): """Determine the final atomtypes from the white- and blacklists.""" atoms = { atom_idx: data for atom_idx, data in topology_graph.atoms(data=True) } for atom_id, atom in typemap.items(): atomtype = [ rule_name for rule_name in atom["whitelist"] - atom["blacklist"] ] if len(atomtype) == 1: atom["atomtype"] = atomtype[0] elif len(atomtype) > 1: raise FoyerError( "Found multiple types for atom {} ({}): {}.".format( atom_id, atoms[atom_id].atomic_number, atomtype ) ) else: raise FoyerError( "Found no types for atom {} ({}).".format( atom_id, atoms[atom_id].atomic_number ) )
5,332,692
def main(): """ Main method """ window = Spiral() window.setup() arcade.run()
5,332,693
def pyccparser2cbmc(srcfile, libs): """ Transforms the result of a parsed file from pycparser to a valid cbmc input. """ fd = open(srcfile, "r") src = fd.read() fd.close() # Replace the definition of __VERIFIER_error with the one for CBMC if "extern void __VERIFIER_error();" in src: # print "__VERIFIER_error found" pos = re.search("extern void __VERIFIER_error\(\);", src).pos # print "position: " + str(pos) vererr = "extern void __VERIFIER_error() __attribute__ ((__noreturn__));" + '\n' src = re.sub("extern void __VERIFIER_error\(\);", vererr, src) # Remove the strip lines with original libs if "_____STARTSTRIPPINGFROMHERE_____" in src: # print "_____STARTSTRIPPINGFROMHERE_____ found" pos = src.find("typedef int _____STARTSTRIPPINGFROMHERE_____;", 0, len(src) ) # print "position: " + str(pos) libstr = "" for lib in reversed(libs): libstr += '#include <' + lib + '>' + '\n' src = src[:pos] + libstr + '\n' + src[pos:] src = strip(src) newfile = srcfile + "_cbmc.c" fd = open(newfile, "w") fd.write(src) fd.close() return newfile
5,332,694
def check_finished(worker, exec_id): """ :param worker: :param exec_id: :return: """ result = worker.status(exec_id) status = dill.loads(base64.b64decode(result.data)) if status["status"] == "FAILED": raise Exception("Remote job execution failed") elif status["status"] == "INVALID ID": raise Exception("Invalid Id") elif status["status"] == "COMPLETED": return True, status else: return False, status
5,332,695
def execute_runs(): """ Execute multiple runs in series using multiple cores. :return: Nothing. """ # Initialise empty list of results. results = [] # Initialise pool of workers. pool = Pool(processes=params['CORES']) for run in range(params['RUNS']): # Execute a single evolutionary run. results.append(pool.apply_async(execute_run, (run,))) for result in results: result.get() # Close pool once runs are finished. pool.close()
5,332,696
def round_filters(filters, global_params): """ Calculate and round number of filters based on depth multiplier. """ multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters)
5,332,697
def print_results(is_cat): """ Print whether cat is in image Args: is_cat: Returns: """ if is_cat: print("Image contains cat!") else: print("Unfortunately, image does not contain cat")
5,332,698
def handle_input(): """Handles the input""" # Global variables that might be changed global bypass_ticks global running for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: current_object.move_x(-1, blocks) elif event.key == pygame.K_RIGHT: current_object.move_x(1, blocks) elif event.key == pygame.K_DOWN: bypass_ticks = True elif event.key == pygame.K_UP: current_object.rotate(blocks)
5,332,699