content
stringlengths
22
815k
id
int64
0
4.91M
def options(): """ pylbm command line options """ parser = ArgumentParser() logging = parser.add_argument_group('log') logging.add_argument("--log", dest="loglevel", default="WARNING", choices=['WARNING', 'INFO', 'DEBUG', 'ERROR'], help="Set the log level") monitoring = parser.add_argument_group('monitoring') monitoring.add_argument("--monitoring", action="store_true", help="Set the monitoring") mpi = parser.add_argument_group('mpi splitting') mpi.add_argument("-npx", dest="npx", default=1, type=int, help="Set the number of processes in x direction") mpi.add_argument("-npy", dest="npy", default=1, type=int, help="Set the number of processes in y direction") mpi.add_argument("-npz", dest="npz", default=1, type=int, help="Set the number of processes in z direction") args, _ = parser.parse_known_args() return args
9,800
def omni_load(dates, level, downloadonly=False, varformat=None, get_support_data=False, prefix='', suffix=''): """Loads OMNI data into pytplot variables""" file_list = omni_filename(dates, level) # 1. Download files count = 0 downloaded_files = [] for remotef, localf in file_list: count += 1 resp, err, localfile = pyspedas.download_files(remotef, localf) if resp: print(str(count) + '. File was downloaded. Location: ' + localfile) downloaded_files.append(localfile) else: print(str(count) + '. Error: Could not download file: ' + remotef) print(err) print('Downloaded ' + str(len(downloaded_files)) + ' files.') # 2. Load files into tplot downloaded_vars = [] if not downloadonly: try: downloaded_vars = pytplot.cdf_to_tplot(downloaded_files, varformat, get_support_data, prefix, suffix, False, True) except TypeError as e: msg = "cdf_to_tplot could not load all data.\nError:\n" + str(e) print(msg) print('Loaded ' + str(len(downloaded_vars)) + ' variables.') # 3. Time clip if len(downloaded_vars) > 0: pyspedas.time_clip(downloaded_vars, dates[0], dates[1], '')
9,801
def compte_var(f, var): """compte le nombre d'apparition de la variable var dans f""" n = f.nb_operandes() if n == 0: v = f.get_val() if v == var: return 1 else: return 0 elif n == 1: f2 = (f.decompose())[0] return compte_var(f2, var) else: [f2, f3] = f.decompose() return compte_var(f2, var) + compte_var(f3, var)
9,802
def inject_vars(): # 函数名可以随意修改 """模板上下文处理函数""" from watchlist.models import User user = User.query.first() # 用户对象 if not user: user = User() user.name = 'BL00D' return locals()
9,803
def try_parse_section(text: str, section_name: str) -> str: """ Parse a section. Return an empty string if section not found. Args: text (str): text section_name (str): section's name Returns: (str): section """ try: return parse_section(text, section_name) except Exception: return ""
9,804
def client_code2(component1: ComponentInterface, component2: ComponentInterface) -> None: """ Thanks to the fact that the child-management operations are declared in the base Component class, the client code can work with any component, simple or complex, without depending on their concrete classes. """ if component1.is_composite(): component1.add(component2) print(f'Result: {component1.operation()}', end='')
9,805
def on_handshake(bot): """Hook function called when the bot starts, after handshake with the server. Here the bot already knows server enforced dimensions (defined in server side bots.cfg file). This is called right before starting to poll for tasks. It's a good time to do some final initialization or cleanup that may depend on server provided configuration. Arguments: - bot: bot.Bot instance. See ../api/bot.py. """ pass
9,806
def run_all(base_logdir): """Generate waves of the shapes defined above. For each wave, creates a run that contains summaries. Arguments: base_logdir: the directory into which to store all the runs' data """ waves = [ ("sine_wave", sine_wave), ("square_wave", square_wave), ("bisine_wave", bisine_wave), ] for wave_name, wave_constructor in waves: logdir = os.path.join(base_logdir, wave_name) writer = tf.summary.create_file_writer(logdir) with writer.as_default(): for step in range(FLAGS.steps): run(wave_name, wave_constructor, step)
9,807
def test_power(): """Test computing and saving PSD.""" fmin = 0.1 fmax = 300 compute_and_save_psd(raw_fname, fmin, fmax, method='welch') compute_and_save_psd(raw_fname, fmin, fmax, method='multitaper')
9,808
def get_location_based_lifers(web_page): """ a method that takes in a web page and returns back location frequency for lifers and lifer details. """ bs4_object = BeautifulSoup(web_page, html_parser) table_list = bs4_object.find_all('li', class_=myebird_species_li_class) lifer_data_list = [] for item in table_list: bird_name = item.find_all('div')[1].find('a').find_all('span')[0].contents[0].strip() location = item.find_all('div')[2].find_all('div')[1].find_all('a')[0].contents[0].strip() date = item.find_all('div')[2].find_all('div')[0].find('a').contents[0].strip() lifer_data_list.append([bird_name, location, date]) location_frequency = dict() for item in range(len(lifer_data_list)): if lifer_data_list[item][1] in location_frequency.keys(): location_frequency[lifer_data_list[item][1]] += 1 else: location_frequency[lifer_data_list[item][1]] = 1 sorted_location_frequency = sorted(location_frequency.items(), key=lambda x: x[1], reverse=True) return sorted_location_frequency, lifer_data_list
9,809
def parse_show_qos_queue_profile(raw_result): """ Parse the show command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the 'show qos queue-profile' command in a \ dictionary: for 'show qos queue-profile': :: { 'default': {'profile_name': 'default', 'profile_status': 'applied'}, 'factory-default': {'profile_name': 'factory-default', 'profile_status': 'complete'} } for 'show qos queue-profile <name>': :: { '0': {'queue_num': '0', 'local_priorities': '0', 'name': 'Scavenger_and_backup_data'}, '1': {'queue_num': '1', 'local_priorities': '1', 'name': ''}, ... } """ hyphen_line = raw_result.splitlines()[1] columns = [pos for pos, char in enumerate(hyphen_line) if char == ' '] result = {} if len(columns) + 1 == 2: # All profiles. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: profile_name = line[columns[0]:len(line)].strip() result[profile_name] = {} result[profile_name]['profile_status'] = \ line[0:columns[0]].strip() result[profile_name]['profile_name'] = \ line[columns[0]:len(line)].strip() elif len(columns) + 1 == 3: # Single profile. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: queue_num = line[0:columns[0]].strip() result[queue_num] = {} result[queue_num]['queue_num'] = \ line[0:columns[0]].strip() result[queue_num]['local_priorities'] = \ line[columns[0]:columns[1]].strip() result[queue_num]['name'] = \ line[columns[1]:len(line)].strip() else: # Error. raise ValueError("Unexpected number of columns.") return result
9,810
def glVertex3sv(v): """ v - seq( GLshort, 3) """ if 3 != len(v): raise TypeError(len(v), "3-array expected") _gllib.glVertex3sv(v)
9,811
def plot_png(num_x_points=50): """ renders the plot on the fly. """ fig = Figure() axis = fig.add_subplot(1, 1, 1) x_points = range(num_x_points) axis.plot(x_points, [random.randint(1, 30) for x in x_points]) output = io.BytesIO() FigureCanvasAgg(fig).print_png(output) return Response(output.getvalue(), mimetype="image/png")
9,812
def render_message(session, window, msg, x, y): """Render a message glyph. Clears the area beneath the message first and assumes the display will be paused afterwards. """ # create message box msg = GlyphCoordinate(session, msg, x, y) # clear existing glyphs which intersect for gly in ( session.query(GlyphCoordinate) .join(GlyphCoordinate.glyph) .filter(GlyphCoordinate.intersects(msg)) ): gly.blank(window) # render msg.render(window, {}) window.refresh() return msg
9,813
def part_one(filename='input.txt', target=2020): """Satisfies part one of day one by first sorting the input rows so we can avoid the worst case O(n**2). We incur O(n log n) to do the sort followed by a brute force search with short circuiting if the sum exceeds our target. This is possible since we know in sorted order, only larger values will follow. Note, we assume only one valid solution in the given file. If more than one, there is no guarantee which will be returned. Parameters ---------- filename : str, optional The file to parse as input will contain one integer per line, by default 'input.txt' target : int, optional The target sum we want to reach, by default 2020 Returns ------- int The product of the two integers that sum to the target value Raises ------ Exception Probably overkill, but I wanted to know if my code was failing to find a solution. Also, I could have looked for a more appropriate exception than the base one. """ items = sorted(map(int, Path(filename).read_text().split())) count = len(items) for i in range(count): for j in range(i+1, count): summand = items[i] + items[j] if summand > target: break elif summand == target: return items[i]*items[j] raise Exception('No solution!')
9,814
def get_urls(name, version=None, platform=None): """ Return a mapping of standard URLs """ dnlu = rubygems_download_url(name, version, platform) return dict( repository_homepage_url=rubygems_homepage_url(name, version), repository_download_url=dnlu, api_data_url=rubygems_api_url(name, version), download_url=dnlu, )
9,815
def add_spot_feature (model, feature, name, short_name, dimension, is_int): """ Add a new spot feature to the model object. This must be done for custom features added using spot.putFeature to get properly integrated into the model. The trackmate model object is modified in-place. """ feature_model = model.getFeatureModel() spot_features = feature_model.getSpotFeatures() spot_feature_names = feature_model.getSpotFeatureNames() spot_feature_short_names = feature_model.getSpotFeatureShortNames() spot_feature_dimensions = feature_model.getSpotFeatureDimensions() spot_feature_is_int = feature_model.getSpotFeatureIsInt() spot_features.add(feature) spot_feature_names[feature] = name spot_feature_short_names[feature] = short_name spot_feature_dimensions[feature] = dimension spot_feature_is_int[feature] = is_int return
9,816
def getEHfields(m1d, sigma, freq, zd, scaleUD=True, scaleValue=1): """Analytic solution for MT 1D layered earth. Returns E and H fields. :param discretize.base.BaseMesh, object m1d: Mesh object with the 1D spatial information. :param numpy.ndarray, vector sigma: Physical property of conductivity corresponding with the mesh. :param float, freq: Frequency to calculate data at. :param numpy.ndarray, vector zd: location to calculate EH fields at :param bool, scaleUD: scales the output to be scaleValue at the top, increases numerical stability. Assumes a halfspace with the same conductive as the deepest cell. """ # Note add an error check for the mesh and sigma are the same size. # Constants: Assume constant mu = mu_0 * np.ones((m1d.nC + 1)) eps = eps_0 * np.ones((m1d.nC + 1)) # Angular freq w = 2 * np.pi * freq # Add the halfspace value to the property sig = np.concatenate((np.array([sigma[0]]), sigma)) # Calculate the wave number k = np.sqrt(eps * mu * w ** 2 - 1j * mu * sig * w) # Initiate the propagation matrix, in the order down up. UDp = np.zeros((2, m1d.nC + 1), dtype=complex) UDp[ 1, 0 ] = scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh # Loop over all the layers, starting at the bottom layer for lnr, h in enumerate(m1d.hx): # lnr-number of layer, h-thickness of the layer # Calculate yp1 = k[lnr] / (w * mu[lnr]) # Admittance of the layer below the current layer zp = (w * mu[lnr + 1]) / k[lnr + 1] # Impedance in the current layer # Build the propagation matrix # Convert fields to down/up going components in layer below current layer Pj1 = np.array([[1, 1], [yp1, -yp1]], dtype=complex) # Convert fields to down/up going components in current layer Pjinv = 1.0 / 2 * np.array([[1, zp], [1, -zp]], dtype=complex) # Propagate down and up components through the current layer elamh = np.array( [[np.exp(-1j * k[lnr + 1] * h), 0], [0, np.exp(1j * k[lnr + 1] * h)]] ) # The down and up component in current layer. UDp[:, lnr + 1] = elamh.dot(Pjinv.dot(Pj1)).dot(UDp[:, lnr]) if scaleUD: # Scale the values such that 1 at the top scaleVal = UDp[:, lnr + 1 :: -1] / UDp[1, lnr + 1] if np.any(~np.isfinite(scaleVal)): # If there is a nan (thickness very great), rebuild the move up cell scaleVal = np.zeros_like(UDp[:, lnr + 1 :: -1], dtype=complex) scaleVal[1, 0] = scaleValue UDp[:, lnr + 1 :: -1] = scaleVal # Calculate the fields Ed = np.empty((zd.size,), dtype=complex) Eu = np.empty((zd.size,), dtype=complex) Hd = np.empty((zd.size,), dtype=complex) Hu = np.empty((zd.size,), dtype=complex) # Loop over the layers and calculate the fields # In the halfspace below the mesh dup = m1d.vectorNx[0] dind = dup >= zd Ed[dind] = UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Eu[dind] = UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) Hd[dind] = (k[0] / (w * mu[0])) * UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Hu[dind] = -(k[0] / (w * mu[0])) * UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) for ki, mui, epsi, dlow, dup, Up, Dp in zip( k[1::], mu[1::], eps[1::], m1d.vectorNx[:-1], m1d.vectorNx[1::], UDp[0, 1::], UDp[1, 1::], ): dind = np.logical_and(dup >= zd, zd > dlow) Ed[dind] = Dp * np.exp(-1j * ki * (dup - zd[dind])) Eu[dind] = Up * np.exp(1j * ki * (dup - zd[dind])) Hd[dind] = (ki / (w * mui)) * Dp * np.exp(-1j * ki * (dup - zd[dind])) Hu[dind] = -(ki / (w * mui)) * Up * np.exp(1j * ki * (dup - zd[dind])) # Return return the fields return Ed, Eu, Hd, Hu
9,817
def nested_tags(dst_client, run_ids_mapping): """ Set the new parentRunId for new imported child runs. """ for _,(dst_run_id,src_parent_run_id) in run_ids_mapping.items(): if src_parent_run_id: dst_parent_run_id,_ = run_ids_mapping[src_parent_run_id] dst_client.set_tag(dst_run_id, "mlflow.parentRunId", dst_parent_run_id)
9,818
def write_dataset(src_dir, dst_file, limit_rows=0): """Reads JSON files downloaded by the Crawler and writes a CSV file from their data. The CSV file will have the following columns: - repo_id: Integer - issue_number: Integer - issue_title: Text - issue_body_md: Text, in Markdown format, can be empty - issue_body_plain: Text, in plain text, can be empty - issue_created_at: Integer, in Unix time - issue_author_id: Integer - issue_author_association: Integer enum (see values below) - issue_label_ids: Comma-separated integers, can be empty - pull_number: Integer - pull_created_at: Integer, in Unix time - pull_merged_at: Integer, in Unix time - pull_comments: Integer - pull_review_comments: Integer - pull_commits: Integer - pull_additions: Integer - pull_deletions: Integer - pull_changed_files: Integer The value of issue_body_plain is converted from issue_body_md. The conversion is not always perfect. In some cases, issue_body_plain still contains some Markdown tags. The value of issue_author_association can be one of the following: - 0: Collaborator - 1: Contributor - 2: First-timer - 3: First-time contributor - 4: Mannequin - 5: Member - 6: None - 7: Owner Rows are sorted by repository owner username, repository name, pull request number, and then issue number. The source directory must contain owner/repo/issue-N.json and owner/repo/pull-N.json files. The destination directory of Crawler should normally be used as the source directory of Writer. The destination file will be overwritten if it already exists. Args: src_dir (str): Source directory. dst_file (str): Destination CSV file. limit_rows (int): Maximum number of rows to write. """ repo_full_names = [] repo_num_rows = [] total_num_rows = 0 def print_results(): for r, n in zip(repo_full_names, repo_num_rows): print('{}: {:,}'.format(r, n)) print('Total: {:,}'.format(total_num_rows)) with open(dst_file, 'w', newline='') as dataset_file: dataset = csv.writer(dataset_file) dataset.writerow(_dataset_header) owner_repo_pairs = _sorted_owner_repo_pairs(src_dir) num_repos = len(owner_repo_pairs) for i, (owner, repo) in enumerate(owner_repo_pairs): repo_full_name = '{}/{}'.format(owner, repo) repo_full_names.append(repo_full_name) repo_num_rows.append(0) print('{} ({:,}/{:,})'.format(repo_full_name, i + 1, num_repos)) for pull_number in tqdm(_sorted_pull_numbers(src_dir, owner, repo)): pull = _read_json(_pull_path_template.format(src_dir=src_dir, owner=owner, repo=repo, pull_number=pull_number)) pull['linked_issue_numbers'].sort() for issue_number in pull['linked_issue_numbers']: issue = _read_json(_issue_path_template.format(src_dir=src_dir, owner=owner, repo=repo, issue_number=issue_number)) dataset.writerow(_dataset_row(issue, pull)) repo_num_rows[i] += 1 total_num_rows += 1 if total_num_rows == limit_rows: print('Limit of {:,} rows reached'.format(limit_rows)) print_results() return print('Finished') print_results()
9,819
def repeated(f, n): """Returns a function that takes in an integer and computes the nth application of f on that integer. Implement using recursion! >>> add_three = repeated(lambda x: x + 1, 3) >>> add_three(5) 8 >>> square = lambda x: x ** 2 >>> repeated(square, 2)(5) # square(square(5)) 625 >>> repeated(square, 4)(5) # square(square(square(square(5)))) 152587890625 >>> repeated(square, 0)(5) 5 >>> from construct_check import check >>> # ban iteration >>> check(HW_SOURCE_FILE, 'repeated', ... ['For', 'While']) True """ if n == 0: return identity else: return compose1(f, repeated(f, n - 1))
9,820
def method_functions(): """ Returns a dictionary containing the valid method keys and their corresponding dispersion measure functions. """ return _available
9,821
def get_file_names(maindir, sessid, expid, segid, date, mouseid, runtype="prod", mouse_dir=True, check=True): """ get_file_names(maindir, sessionid, expid, date, mouseid) Returns the full path names of all of the expected data files in the main directory for the specified session and experiment on the given date that can be used for the Credit Assignment analysis. Required args: - maindir (str): name of the main data directory - sessid (int) : session ID (9 digits) - expid (str) : experiment ID (9 digits) - segid (str) : segmentation ID (9 digits) - date (str) : date for the session in YYYYMMDD, e.g. "20160802" - mouseid (str): mouse 6-digit ID string used for session files Optional args: - runtype (str) : "prod" (production) or "pilot" data default: "prod" - mouse_dir (bool): if True, session information is in a "mouse_*" subdirectory default: True - check (bool) : if True, checks whether the files and directories in the output dictionaries exist (with a few exceptions) default: True Returns: - dirpaths (dict): dictionary of directory paths ["expdir"] (str) : full path name of the experiment directory ["procdir"] (str) : full path name of the processed directory ["demixdir"] (str): full path name of the demixed directory ["segdir"] (str) : full path name of the segmentation directory - filepaths (dict): dictionary of file paths ["behav_video_h5"] (str) : full path name of the behavioral hdf5 video file ["pupil_video_h5"] (str) : full path name of the pupil hdf5 video file ["roi_extract_json"] (str) : full path name of the ROI extraction json ["roi_objectlist_txt"] (str): full path to ROI object list txt ["stim_pkl"] (str) : full path name of the stimulus pickle file ["stim_sync_h5"] (str) : full path name of the stimulus synchronization hdf5 file ["time_sync_h5"] (str) : full path name of the time synchronization hdf5 file Existence not checked: ["align_pkl"] (str) : full path name of the stimulus alignment pickle file ["corrected_data_h5"] (str) : full path name of the motion corrected 2p data hdf5 file ["roi_trace_h5"] (str) : full path name of the ROI raw processed fluorescence trace hdf5 file (allen version) ["roi_trace_dff_h5"] (str) : full path name of the ROI dF/F trace hdf5 file (allen version) ["zstack_h5"] (str) : full path name of the zstack 2p hdf5 file """ sessdir, expdir, procdir, demixdir, segdir = get_sess_dirs( maindir, sessid, expid, segid, mouseid, runtype, mouse_dir, check) roi_trace_paths = get_roi_trace_paths( maindir, sessid, expid, segid, mouseid, runtype, mouse_dir, dendritic=False, check=False) # will check below, if required # set the file names sess_m_d = f"{sessid}_{mouseid}_{date}" dirpaths = {"expdir" : expdir, "procdir" : procdir, "segdir" : segdir, "demixdir": demixdir } filepaths = {"align_pkl" : os.path.join(sessdir, f"{sess_m_d}_df.pkl"), "behav_video_h5" : os.path.join(sessdir, f"{sess_m_d}_video-0.h5"), "correct_data_h5" : os.path.join(procdir, "concat_31Hz_0.h5"), "pupil_video_h5" : os.path.join(sessdir, f"{sess_m_d}_video-1.h5"), "roi_extract_json" : os.path.join(procdir, f"{expid}_input_extract_traces.json"), "roi_trace_h5" : roi_trace_paths["roi_trace_h5"], "roi_trace_dff_h5" : roi_trace_paths["roi_trace_dff_h5"], "roi_objectlist_txt": os.path.join(segdir, "objectlist.txt"), "stim_pkl" : os.path.join(sessdir, f"{sess_m_d}_stim.pkl"), "stim_sync_h5" : os.path.join(sessdir, f"{sess_m_d}_sync.h5"), "time_sync_h5" : os.path.join(expdir, f"{expid}_time_synchronization.h5"), "zstack_h5" : os.path.join(sessdir, f"{sessid}_zstack_column.h5"), } if check: # files not to check for (are created if needed or should be checked # when needed, due to size) no_check = ["align_pkl", "correct_data_h5", "zstack_h5", "roi_trace_h5", "roi_trace_dff_h5"] for key in filepaths.keys(): if key not in no_check: file_util.checkfile(filepaths[key]) return dirpaths, filepaths
9,822
def get_key_vault_output(name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKeyVaultResult]: """ Use this data source to access information about an existing Key Vault. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.keyvault.get_key_vault(name="mykeyvault", resource_group_name="some-resource-group") pulumi.export("vaultUri", example.vault_uri) ``` :param str name: Specifies the name of the Key Vault. :param str resource_group_name: The name of the Resource Group in which the Key Vault exists. """ ...
9,823
def generate_auth_token(): """Generate a token using jwt. Returns: token. """ key = PRIVATE_KEY data = {'appId': APPLICATION_ID} token = jwt.encode(data, key, algorithm='RS256') return token
9,824
def dict_expand(d: Dict[Any, Any]) -> List[Dict[Any, Any]]: """Converts a dictionary of lists to a list of dictionaries. The resulting list will be of the same length as the longest dictionary value. If any values are not lists then they will be repeated to the required length. Args: d: The dictionary of arrays to expand. Returns: The resulting list of dictionaries. """ size = max([_len_arg(arg) for arg in d.values()]) d = {k: _expand_arg(v, size) for k, v in d.items()} return [{k: v[i] for k, v in d.items()} for i in range(size)]
9,825
def get_asexual_lineage_num_discrete_state_changes(lineage, attribute_list): """Get the number of discrete state changes from an asexual lineage. State is described by the aggregation of all attributes give by attribute list. Args: lineage (networkx.DiGraph): an asexual lineage attribute_list (list): list of attributes (strings) to use when defining a state Returns: Returns the number of discrete states along the lineage. """ # Check that lineage is an asexual lineage. if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage") # Check that all nodes have all given attributes in the attribute list if not utils.all_taxa_have_attributes(lineage, attribute_list): raise Exception("given attributes are not universal among all taxa along the lineage") # get the first state (root node) lineage_id = utils.get_root_ids(lineage)[0] num_states = 1 cur_state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] # count the number of state changes moving down the lineage while True: successor_ids = list(lineage.successors(lineage_id)) if len(successor_ids) == 0: break # We've hit the last thing! lineage_id = successor_ids[0] state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] if cur_state != state: cur_state = state num_states += 1 return num_states
9,826
def check_downloaded(dataset: str, directory: str = None) -> bool: """ Check whether dataset is downloaded Args: dataset (str): String of dataset's name, e.g. ml-100k, bx directory (str, optional): String of directory of downloaded data. Defaults to None. Returns: bool: Boolean flag to show if the dataset is downloaded, i.e. name of dataset is in the list of subdirectory in input directory. """ return True if dataset in get_downloaded_data(directory=directory) else False
9,827
def do_main(title): """main skeleton""" filename = title + "_ref" lst = read_distr_list(filename + ".txt") with open(filename + ".csv", "wt") as fout: for (ex, name, args) in lst: d = to_scipy_dist(name, args) m = d.mean() v = d.var() ent = d.entropy() x25 = d.ppf(0.25) x50 = d.ppf(0.50) x75 = d.ppf(0.75) if title == "discrete": lp25 = d.logpmf(x25) lp50 = d.logpmf(x50) lp75 = d.logpmf(x75) else: lp25 = d.logpdf(x25) lp50 = d.logpdf(x50) lp75 = d.logpdf(x75) # workaround inconsistency of definitions if name == "Geometric": x25 -= 1 x50 -= 1 x75 -= 1 m -= 1 if title == "discrete": print >>fout, '"%s", %.16e, %.16e, %.16e, %d, %d, %d, %.16e, %.16e, %.16e' % ( ex, m, v, ent, x25, x50, x75, lp25, lp50, lp75) else: print >>fout, '"%s", %.16e, %.16e, %.16e, %.16e, %.16e, %.16e, %.16e, %.16e, %.16e' % ( ex, m, v, ent, x25, x50, x75, lp25, lp50, lp75)
9,828
def enterprise_1_9_installer() -> Path: """ Return the path to an installer for DC/OS Enterprise 1.9. """ return Path('/tmp/dcos_generate_config_1_9.ee.sh')
9,829
def Laplacian(n): """ Create Laplacian on 2-dimensional grid with n*n nodes """ B = forward_diff_matrix(n) D = -B.T @ B Dx = sparse.kron(sparse.eye(n), D).tocsr() Dy = sparse.kron(D, sparse.eye(n)).tocsr() return Dx + Dy
9,830
def move_clip_down(base_node, clip, comparison_response): """ Moves a given clip to a lower node the the redblack search/sort tree. If possible it recursively moves the clip multiple steps down, but usually just goes one step. Raises NewNodeNeeded if it hits the bottom of the tree. """ # Okay, this is a little bit weird. Sorry for the awkwardness. # When the user responds "left" they're saying that the left clip is BETTER # In the redblack tree, left-nodes are worse, and right-nodes are better. # Since clip1 is the left clip, we want to move it left in the tree if the user says "right". move_left = (comparison_response == "right") if move_left and base_node.left: comparisons = Comparison.objects.filter(tree_node=base_node.left, left_clip=clip).exclude(response=None) if comparisons: move_clip_down(base_node.left, clip, comparisons[0].response) else: print(base_node.left, "gains", clip, "as new pending clip!") base_node.left.pending_clips.add(clip) elif (not move_left) and base_node.right: comparisons = Comparison.objects.filter(tree_node=base_node.right, left_clip=clip).exclude(response=None) if comparisons: move_clip_down(base_node.right, clip, comparisons[0].response) else: print(base_node.right, "gains", clip, "as new pending clip!") base_node.right.pending_clips.add(clip) else: raise NewNodeNeeded(move_left)
9,831
def LookupGitSVNRevision(directory, depth): """ Fetch the Git-SVN identifier for the local tree. Parses first |depth| commit messages. Errors are swallowed. """ if not IsGitSVN(directory): return None git_re = re.compile(r'^\s*git-svn-id:\s+(\S+)@(\d+)') proc = RunGitCommand(directory, ['log', '-' + str(depth)]) if proc: for line in proc.stdout: match = git_re.match(line) if match: id = match.group(2) if id: proc.stdout.close() # Cut pipe for fast exit. return id return None
9,832
def collect_jars( dep_targets, dependency_analyzer_is_off = True, unused_dependency_checker_is_off = True, plus_one_deps_is_off = True): """Compute the runtime and compile-time dependencies from the given targets""" # noqa if dependency_analyzer_is_off: return _collect_jars_when_dependency_analyzer_is_off( dep_targets, unused_dependency_checker_is_off, plus_one_deps_is_off, ) else: return _collect_jars_when_dependency_analyzer_is_on(dep_targets)
9,833
def describe_vpn_gateways(DryRun=None, VpnGatewayIds=None, Filters=None): """ Describes one or more of your virtual private gateways. For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide . See also: AWS API Documentation :example: response = client.describe_vpn_gateways( DryRun=True|False, VpnGatewayIds=[ 'string', ], Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ] ) :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation . :type VpnGatewayIds: list :param VpnGatewayIds: One or more virtual private gateway IDs. Default: Describes all your virtual private gateways. (string) -- :type Filters: list :param Filters: One or more filters. attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached ). attachment.vpc-id - The ID of an attached VPC. availability-zone - The Availability Zone for the virtual private gateway (if applicable). state - The state of the virtual private gateway (pending | available | deleting | deleted ). tag :key =*value* - The key/value combination of a tag assigned to the resource. Specify the key of the tag in the filter name and the value of the tag in the filter value. For example, for the tag Purpose=X, specify tag:Purpose for the filter name and X for the filter value. tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter 'tag-key=Purpose' and the filter 'tag-value=X', you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag :key =*value* filter. tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter. type - The type of virtual private gateway. Currently the only supported type is ipsec.1 . vpn-gateway-id - The ID of the virtual private gateway. (dict) --A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs. Name (string) --The name of the filter. Filter names are case-sensitive. Values (list) --One or more filter values. Filter values are case-sensitive. (string) -- :rtype: dict :return: { 'VpnGateways': [ { 'VpnGatewayId': 'string', 'State': 'pending'|'available'|'deleting'|'deleted', 'Type': 'ipsec.1', 'AvailabilityZone': 'string', 'VpcAttachments': [ { 'VpcId': 'string', 'State': 'attaching'|'attached'|'detaching'|'detached' }, ], 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] }, ] } """ pass
9,834
def _checkBounds(_datetimes,datetimes): """ """ dt_min=np.min(datetimes) dt__min=np.min(_datetimes) dt_max=np.max(datetimes) dt__max=np.max(_datetimes) if dt_min <dt__min:raise Exception("{} is below reference datetimes {}".format(dt_min,dt__min)) if dt_max >dt__max:raise Exception("{} is above reference datetimes {}".format(dt_max,dt__max))
9,835
def _cred1_adapter(user=None, password=None): """Just a sample adapter from one user/pw type to another""" return dict(user=user + "_1", password=password + "_2")
9,836
def prepare(path, dest): """ Extend file and prepend dictionary to it """ with open(path, 'rb') as f: data = f.read() prepare_file(data, dest)
9,837
def acc_metric(y_true, y_pred): """ Accuracy """ diff = K.abs(y_pred - y_true) * 5000 return K.mean(diff, axis=-1)
9,838
def prepare_env(): """This function implements main logic of preparation Virtual machines for Kubernetes installation: * Read configs from files. * Validates Nutanix environment. * Prepare base Virtual Machine. * Clone base Virtual Machine. * Turn on Virtual Machines. * Read Virtual Machines IP's * Generate **Kubespray** inventory file. """ logger.info('Reading environment variables') k8s_cluster_name = os.environ[K8S_CLUSTER_ENV] if not DOMAIN_NAME.match(k8s_cluster_name): raise ConfigurationError(ConfigurationError.INVALID_DOMAIN) nutanix_cluster_name = os.environ[NUTANIX_CLUSTER_ENV] ( k8s_common_config, k8s_master_config, k8s_worker_config) = get_kubernetes_config(os.path.abspath(K8S_CONFIG)) nutanix = Nutanix( os.path.abspath(NUTANIX_CONFIG), nutanix_cluster_name ) logger.info('Check if there are any %s cluster vms', k8s_cluster_name) nutanix.get_vms(k8s_cluster_name, expected_count=0) logger.info('There is no vm with %s in name. Proceeding', k8s_cluster_name) logger.info('Get network configuration') try: network_name = k8s_common_config['network_name'] except KeyError: raise ConfigurationError(ConfigurationError.MISSING_FIELD.format('network_name')) else: network = nutanix.get_network(network_name) logger.info('Get or create Centos cloud image') try: os_image_name = k8s_common_config['os_image_name'] storage_container_name = k8s_common_config['storage_container_name'] except KeyError: raise ConfigurationError(ConfigurationError.MISSING_FIELD.format('storage_container_name')) else: os_image = nutanix.get_or_create_os_image( os_image_name, storage_container_name, OS_IMAGE_URL ) logger.info('Get or create base vm') base_vm = nutanix.get_or_create_vm( BASE_VM_CPU, BASE_VM_RAM, BASE_VM_DISK, os.environ[BASE_VM_ENV], network['uuid'], os_image['vm_disk_id'], generate_cloud_config(os.environ[SSH_DIR_ENV]) ) # TODO: prepopulate docker images logger.info('Clone vms') nutanix.clone_vm( vm_uuid=base_vm['uuid'], configs=(k8s_master_config, k8s_worker_config), vm_domain=k8s_cluster_name ) expected_count = k8s_master_config['number_of_nodes'] + k8s_worker_config['number_of_nodes'] logger.info( 'Check if there all(%s) vms for %s cluster were created.', expected_count, k8s_cluster_name ) nutanix.get_vms(k8s_cluster_name, expected_count=expected_count) logger.info('Turn on vms') for vm_uuid in nutanix.get_vms_property(k8s_cluster_name, 'uuid').values(): nutanix.set_vm_power(vm_uuid, 'on') # Waiting for Virtual Machines to be fully running. logger.info('Get vms ips') vms_with_ips = nutanix.get_vms_property(k8s_cluster_name, 'ipAddresses') while not all(vms_with_ips.values()): logger.info( 'Not all ips assigned. Waiting %s seconds before another check', Nutanix.SLEEP_TIME ) time.sleep(Nutanix.SLEEP_TIME) vms_with_ips = nutanix.get_vms_property(k8s_cluster_name, 'ipAddresses') logger.info('Generate ansible inventory') inventory_lines = [ '{} ansible_ssh_host={}'.format( name, node_ips[0] ) for name, node_ips in vms_with_ips.iteritems() ] inventory_lines.append('\n[kube-master]') inventory_lines.extend( [name for name in vms_with_ips if name.startswith('master')] ) inventory_lines.append('\n[kube-node]') inventory_lines.extend( [name for name in vms_with_ips if name.startswith('worker')] ) inventory_lines.extend(INVENTORY_CONST) with open(INVENTORY_FILE, 'w') as inventory: inventory.write('\n'.join(inventory_lines)) with open(INVENTORY_FILE, 'r') as inventory: logger.debug('Created inventory file:\n%s', inventory.read()) logger.info('Inventory successfully generated. Moving to Kargo part.')
9,839
def whereSnippet(snippet): """ Show which snippets (note the plural) would be used, in order of precedence. """ logger.debug("Called whereSnippet(%s)..." % snippet)
9,840
def promote(source, dest): """Promote the docker image from SOURCE to DEST. Promote a docker image, for example promote the docker image for the local environment to the dev environment. Promoting a docker images involves re-tagging the image locally with the proper label and then pushing the image to Google Container Registry. SOURCE and DEST must be valid environments for twentyquestions. """ registry = settings.CONTAINER_REGISTRY_URL docker_repo = settings.CONTAINER_REGISTRY_USER image_name = settings.CONTAINER_REGISTRY_IMAGE_NAME logger.info(f'Promoting {source} to {dest}') subprocess.run([ 'docker', 'tag', f'{registry}/{docker_repo}/{image_name}:{source}', f'{registry}/{docker_repo}/{image_name}:{dest}' ]) logger.info(f'Pushing {dest} to GCR') subprocess.run([ 'docker', 'push', f'{registry}/{docker_repo}/{image_name}:{dest}' ])
9,841
async def confirm(message: discord.Message, fallback: str = None) -> bool: """ Helper function to send a checkmark reaction on a message. This would be used for responding to a user that an action completed successfully, without sending a whole other message. If a checkmark reaction cannot be added, the optional `fallback` message will be sent instead. :param discord.Message message: The message to add the reaction to. :param str fallback: The fallback message to be sent to the channel, if the reaction could not be added. :return: Whether confirming the message succeeded. """ try: await message.add_reaction("☑") except: pass else: return True if fallback is None: return False # now still executing only if the above failed try: await message.channel.send(fallback) except: return False # we weren't able to send any feedback to the user at all else: return True
9,842
def get_layer_version( lambda_client: BaseClient, layer_name: str, version: int, ) -> "definitions.LambdaLayer": """Retrieve the configuration for the specified lambda layer.""" return definitions.LambdaLayer( lambda_client.get_layer_version( LayerName=layer_name, VersionNumber=version, ) )
9,843
def get_detail_msg(detail_url): """ 2.获取某个职位的详细数据 :param detail_url: 职位详细页面的url :return: 职位数据 """ # print('请求的详细地址是:' + detail_url) response = requests.get(detail_url, headers=HEADERS) html_element = etree.HTML(response.text) position = {} # 【数据】获取职位标题 title = html_element.xpath('//tr[@class="h"]/td/text()')[0] position['title'] = title # 【数据】工作地点/职位类别 top_infos = html_element.xpath('//tr[@class="c bottomline"]//text()') position['location'] = top_infos[top_infos.index('工作地点:') + 1] position['category'] = top_infos[top_infos.index('职位类别:') + 1] content_infos = html_element.xpath('//ul[@class="squareli"]') # 【数据】工作职责 work_do_info = content_infos[0] position['duty'] = work_do_info.xpath("./li/text()") # 【数据】工作要求 work_ask_info = content_infos[1] position['ask'] = work_ask_info.xpath('./li/text()') return position
9,844
def wizard_process_received_form(form): """ Processing of form received during the time measure Expected result example: {1: '00:43.42', 2: '00:41.35', 3: '00:39.14', 4: '00:27.54'} """ lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith("line")} # print(lines) times = {key.split('_')[1]: value for key, value in form.items() if key.startswith("time")} # print(times) return {int(value): times[key] for key, value in lines.items()}
9,845
def tmp_file(request): """ Create a temporary file with a FASTQ suffix. :param request: SubRequest with ``param`` member which has \ a FASTQ suffix :type request: _pytest.fixtures.SubRequest :return: path to temporary file :rtype: str or unicode """ _, tmp_file = tempfile.mkstemp(prefix="tmp", suffix="." + request.param) yield tmp_file if os.path.exists(tmp_file): os.remove(tmp_file)
9,846
def getsource(obj,is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: return inspect.getsource(obj)
9,847
def test_mix_cells_from_gds_and_from_function2(): """Ensures not duplicated cell names. when cells loaded from GDS and have the same name as a function with @cell decorator """ c = gf.Component("test_mix_cells_from_gds_and_from_function") c << gf.c.mzi() c << gf.import_gds(gdspath) c.write_gds() c.show()
9,848
def take_screenshot(): """Saves a screenshot of the current window in the 'screenshots' directory.""" timestamp = f"{pd.Timestamp('today'):%Y-%m-%d %I-%M %p}" path = ''.join(( './screenshots/', 'screenshot ', timestamp, '.png') ) driver.save_screenshot(path)
9,849
def extract_sector_id(room): """Given a room identifier of the form: 'aaa-bbb-cc-d-e-123[abcde]' Return the sector id: '123' """ m = re.search(r'(?P<sector_id>\d+)', room) return m.group('sector_id') if m else None
9,850
def test_Covmat(): """ Test the Covmat function by checking that its inverse function is Qmat """ n = 1 B = np.random.rand(n, n) + 1j * np.random.rand(n, n) B = B + B.T sc = find_scaling_adjacency_matrix(B, 1) idm = np.identity(2 * n) X = Xmat(n) Bsc = sc * B A = np.block([[Bsc, 0 * Bsc], [0 * Bsc, Bsc.conj()]]) Q = np.linalg.inv(idm - X @ A) cov = Covmat(Q) Qrec = Qmat(cov) assert np.allclose(Q, Qrec)
9,851
def splitext_all(_filename): """split all extensions (after the first .) from the filename should work similar to os.path.splitext (but that splits only the last extension) """ _name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:]) return(_name, "."+ _extensions)
9,852
def catch(func, *args, **kw): """Catch most exceptions in 'func' and prints them. Use to decorate top-level functions and commands only. """ # d: print("calling %s with args %s, %s" % (func.__name__, args, kw)) try: return func(*args, **kw) except Exception as e: print(e) # TODO consider: # if e.message: # print(e.message) # else: # print(e)
9,853
def tag_in_tags(entity, attribute, value): """ Return true if the provided entity has a tag of value in its tag list. """ return value in entity.tags
9,854
def generate_finding_title(title): """ Generate a consistent title for a finding in AWS Security Hub * Setup as a function for consistency """ return "Trend Micro: {}".format(title)
9,855
def makeHexagon(x,y,w,h): """Return hexagonal QPolygon. (x,y) is top left coner""" points=[] cos=[1.,0.5,-0.5,-1,-0.5,0.5] sin=[0,0.866025,0.866025,0,-0.866025,-0.866025] for i in range(len (cos)): points.append(QPoint(x+w*cos[i],y+h*sin[i])) return QPolygonF(points)
9,856
def save_conv_output(activations, name): """ Saves layer output in activations dict with name key """ def get_activation(m, i, o): activations[name] = F.relu(o).data.cpu().numpy() return get_activation
9,857
async def get_profile_xp(user_id: int): """ Get a user's profile xp. :param user_id: Discord User ID """ return (await self.conn.fetchrow("SELECT profilexp FROM currency.levels WHERE userid = $1", user_id))[0]
9,858
def tryrmcache(dir_name, verbose=False): """ removes all __pycache__ starting from directory dir_name all the way to leaf directory Args: dir_name(string) : path from where to start removing pycache """ # directory_list = list() is_removed = False for root, dirs, _ in os.walk(dir_name, topdown=False): for name in dirs: # directory_list.append(os.path.join(root, name)) if name == "__pycache__": shutil.rmtree(os.path.join(root, name)) is_removed = True if verbose: if is_removed: click.echo("[x] __pycache__ successfully deleted") else: click.echo("[ ] __pycache__ doesn't exist", err=True) return is_removed
9,859
def extract_codeblocks(): """Extract CodeBlocks files from archive to test directory. """ remove_test_directory("codeblocks-*") remove_test_directory("[Cc]ode[Bb]locks") extract_test_tar("codeblocks*.gz", "codeblocks*.tar", ["*.cpp", "*.cxx", "*.h"]) rename_test_directory("codeblocks-*", "CodeBlocks")
9,860
def _add_msg_to_file(filename, msg): """Add the message to the specified file Args: filename (str): path to the file msg (str): message to be appended to the file """ with open(filename, 'a+') as f: f.write('{0}\n'.format(msg))
9,861
def validate_raw_data(data: Optional[UserPackage]) -> bool: """Returns False if invalid data""" # NOTE: add more validation as more fields are required if data is None or data.contribs is None: return False if ( data.contribs.total_stats.commits_count > 0 and len(data.contribs.total_stats.languages) == 0 ): return False return True
9,862
def pretty_print_scene_objects(scene): """Pretty prints scene objects. Args: scene: Scene graph containing list of objects """ for index, ii in enumerate(scene['objects']): print_args = (index, ii['shape'], ii['color'], ii['size'], ii['material']) print('\t%d : %s-%s-%s-%s' % print_args)
9,863
def extractTheSunIsColdTranslations(item): """ Parser for 'The Sun Is Cold Translations' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if '108 maidens' in item['tags']: return buildReleaseMessageWithType(item, '108 Maidens of Destiny', vol, chp, frag=frag, postfix=postfix) if 'Back to the Apocalypse' in item['tags']: return buildReleaseMessageWithType(item, 'Back to the Apocalypse', vol, chp, frag=frag, postfix=postfix) return False
9,864
async def main(): """Turn on the fan light.""" async with aiomodernforms.ModernFormsDevice("192.168.3.197") as fan: await fan.update() print(fan.status) await fan.light( on=LIGHT_POWER_ON, brightness=50, sleep=datetime.now() + timedelta(minutes=2), ) print(fan.status)
9,865
def to_device(x, device): """Cast a hierarchical object to pytorch device""" if isinstance(x, torch.Tensor): return x.to(device) elif isinstance(x, dict): for k in list(x.keys()): x[k] = to_device(x[k], device) return x elif isinstance(x, list) or isinstance(x, tuple): return type(x)(to_device(t, device) for t in x) else: raise ValueError('Wrong type !')
9,866
def test_dwpli(): """Test the Debiased WPLI algorithm.""" # Groundtruth expected = np.load("groundtruth/fc/dwpli.npy") expected = np.nan_to_num(expected) # Data data = np.load("sample_data/fc/eeg_32chans_10secs.npy") # Run csdparams = {"NFFT": 256, "noverlap": 256 / 2.0} dwpliv = dwpli(data, [1.0, 4.0], 128.0, **csdparams) dwpliv = np.nan_to_num(dwpliv) # Test np.testing.assert_allclose(dwpliv, expected, rtol=1e-10, atol=0.0)
9,867
def git_show_oneline(obj): """Returns: One-line description of a git object `obj`, which is typically a commit. https://git-scm.com/docs/git-show """ return exec_headline(['git', 'show', '--oneline', '--quiet', obj])
9,868
def like_post(): """ Like a post """ try: # This will prevent old code from adding invalid post_ids post_id = int(request.args.get('post_id', '-1')) if post_id < 0: return "No Post Found to like!" vote = (db_session.query(Vote) .filter(and_(Vote.object_id == post_id, Vote.user_id == current_user.id)) .first()) if not vote: vote = Vote(user_id=current_user.id, object_id=post_id) db_session.add(vote) db_session.commit() except Exception as e: logging.warning(f'ERROR processing request {e}') return ""
9,869
def login_post(): """Obdelaj izpolnjeno formo za prijavo""" # Uporabniško ime, ki ga je uporabnik vpisal v formo username = bottle.request.forms.user # Izračunamo MD5 hash geslo, ki ga bomo spravili password = password_md5(bottle.request.forms.psw) # Preverimo, ali se je uporabnik pravilno prijavil c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) c.execute("SELECT 1 FROM uporabnik WHERE username=%s AND geslo=%s", [username, password]) if c.fetchone() is None: # Username in geslo se ne ujemata return bottle.template("login.html", napaka="Nepravilna prijava", # v template login nastavljeno opozorilo username=username) # ohranimo isto uporabnisko ime else: # Vse je v redu, nastavimo cookie in preusmerimo na glavno stran bottle.response.set_cookie('username', username, path='/', secret=secret) bottle.redirect("/")
9,870
def parse_symbol_file(filepath, fapi=None): """Read in stock symbol list from a text file. Args: filepath: Path to file containing stock symbols, one per line. fapi: If this is supplied, the symbols read will be conformed to a financial API; currently 'google' or 'yahoo'. Returns: List of stock symbols; list may be empty if file could not be parsed. """ try: with open(filepath, 'r') as file_handle: symbols = [line.strip() for line in list(file_handle) if '#' not in line] if fapi: symbols = conform_symbols(symbols, fapi) except IOError: symbols = [] return symbols
9,871
def trader_tactic_snapshot(symbol, strategy, end_dt=None, file_html=None, fq=True, max_count=1000): """使用聚宽的数据对任意标的、任意时刻的状态进行策略快照 :param symbol: 交易标的 :param strategy: 择时交易策略 :param end_dt: 结束时间,精确到分钟 :param file_html: 结果文件 :param fq: 是否复权 :param max_count: 最大K线数量 :return: trader """ tactic = strategy(symbol) base_freq, freqs = tactic['base_freq'], tactic['freqs'] bg, data = get_init_bg(symbol, end_dt, base_freq=base_freq, freqs=freqs, max_count=max_count, fq=fq) trader = create_advanced_trader(bg, data, strategy) if file_html: trader.take_snapshot(file_html) print(f'saved into {file_html}') else: trader.open_in_browser() return trader
9,872
def preprocess(batch): """ Add zero-padding to a batch. """ tags = [example.tag for example in batch] # add zero-padding to make all sequences equally long seqs = [example.words for example in batch] max_length = max(map(len, seqs)) seqs = [seq + [PAD] * (max_length - len(seq)) for seq in seqs] return seqs, tags
9,873
def policy_update(updated_policies, removed_policies=None, policies=None, **kwargs): """Policy update task This method is responsible for updating the application configuration and notifying the applications that the change has occurred. This is to be used for the dcae.interfaces.policy.policy_update operation. :updated_policies: contains the list of changed policy-configs when configs_only=True (default) Use configs_only=False to bring the full policy objects in :updated_policies:. """ service_component_name = ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME] ctx.logger.info("policy_update for {0}-- updated_policies: {1}, removed_policies: {2}, policies: {3}" .format(service_component_name, updated_policies, removed_policies, policies)) update_inputs = copy.deepcopy(ctx.instance.runtime_properties) update_inputs["updated_policies"] = updated_policies update_inputs["removed_policies"] = removed_policies update_inputs["policies"] = policies resp = _notify_container(**update_inputs) ctx.logger.info("policy_update complete for {0}--notification results: {1}".format(service_component_name,json.dumps(resp)))
9,874
def append_include_as(include_match): """Convert ``#include x`` to ``#include x as y``, where appropriate; also, convert incorrect "as" statements. See INCLUDE_AS dict for mapping from resource to its "as" target. Parameters ---------- include_match : re._pattern_type Match produced by INCLUDE_RE.match(string) Returns ------- repl : string Replacement text for whatever comes after the "#include " """ include_text = include_match.groups()[0] include_as_match = PISAConfigParser.INCLUDE_AS_RE.match(include_text) as_section = None if include_as_match: gd = include_as_match.groupdict() resource = gd['file'] as_section = gd['as'] else: resource = include_text if resource in INCLUDE_AS.keys(): as_section = INCLUDE_AS[resource] if as_section is None: repl = '#include ' + resource else: repl = '#include %s as %s' % (resource, as_section) return repl
9,875
def get_user_profiles(page=1, limit=10): """Retrieves a list of user profiles. :param page: Page number :type page: int :param limit: Maximum number of results to show :type limit: int :returns: JSON string of list of user profiles; status code :rtype: (str, int) """ # initialize query query = Query.make( UserProfile, UserProfile.id.asc(), { 'id.asc': UserProfile.id.asc(), 'id.desc': UserProfile.id.desc(), 'user_id.asc': UserProfile.user_id.asc(), 'user_id.desc': UserProfile.user_id.desc(), 'joined_at.asc': UserProfile.joined_at.asc(), 'joined_at.desc': UserProfile.joined_at.desc(), }, request.args, Query.STATUS_FILTER_ADMIN) # retrieve and return results results = list(query.limit(limit).offset((page - 1) * limit)) if len(results) > 0: # prep initial output output = { 'user_profiles': UserProfileAdminSchema(many=True).dump(results), 'page': page, 'limit': limit, 'total': query.count() } # add pagination URIs and return output.update( Pager.get_uris('admin_user_profiles.get_user_profiles', page, limit, output['total'], request.args)) return jsonify(output), 200 return '', 204
9,876
def _repository(): """Helper dependency injection""" db = sqlite3.connect('covid_database.db', isolation_level=None) return CovidTestDataRepository(db)
9,877
def f(x): """Cubic function.""" return x**3
9,878
def test_polynomial(): """Interpolate f(x) = 1 + 2x + 3x**2.""" f = np.polynomial.polynomial.Polynomial((1, 2, 3)) x = np.arange(0, 1000, 0.01) y = np.array([f(i) for i in x]) interpolater = cubicspline.Interpolater(x, y) for x_ in np.asarray([0, 1, 0.0998, 456, 666.666, 998.501, 999.98, 99.98999]): assert abs(interpolater(x_) - f(x_)) < 1e-7 t = interpolater.test(0, 10, 100) for x_, y_ in t: assert abs(y_ - f(x_)) < 1e-7
9,879
def req_cache(*args, **kwargs): """ Cache decorate for `ruia.Request` class :param args: :param kwargs: :return: """ fetch_func = args[0] @wraps(fetch_func) async def wrapper(self, delay=True): cache_resp = None url_str = f"{self.url}:{self.method}" req_url_str = f"{url_str}:{self.request_config}" # Check whether the current path exists cache_path = gen_cache_dir() pro_dir = os.path.join(cache_path, self.spider_name) if not os.path.exists(pro_dir): create_cache_dir(pro_dir) req_file = f"{md5_encryption(string=req_url_str)}_req.ruia" resp_file = f"{md5_encryption(string=url_str)}_resp.ruia" req_url_path = os.path.join(pro_dir, req_file) resp_url_path = os.path.join(pro_dir, resp_file) pickle_ins = PickleSerializer() if os.path.exists(req_url_path) and os.path.exists(resp_url_path): # Get data locally try: async with aiofiles.open(req_url_path, mode="rb") as f: s_data = await f.read() data = pickle_ins.loads(s_data) cache_resp = data["cache_resp"] except Exception as e: logger.error( f"<Cache load failed: url: {self.url}, method: {self.method}, err: {e}>" ) else: # Delete already path os.remove(req_url_path) if os.path.exists(req_url_path) else None os.remove(resp_url_path) if os.path.exists(resp_url_path) else None # Make a request resp: Response = await fetch_func(self, delay) try: from ruia_cache.response import CacheResponse cache_resp = CacheResponse( url=resp.url, method=resp.method, encoding=resp.encoding, metadata=resp.metadata, cookies=resp.cookies, headers=dict(resp.headers), history=resp.history, status=resp.status, aws_json=None, aws_text=None, aws_read=None, ) cache_resp.spider_name = self.spider_name data = { "cache_resp": cache_resp, # "expire_time": time.time() + ttl, # "ttl": ttl, } s_data = pickle_ins.dumps(data) # Persist target data async with aiofiles.open(req_url_path, mode="wb+") as f: await f.write(s_data) logger.info( f"<Cache serialization successfully: " f"cache_path: {req_url_path} [url: {resp.url}, method: {resp.method}]>" ) cache_resp._source_resp = resp except Exception as e: logger.error( f"<Cache serialization failed: url: {resp.url}, method: {resp.method}, err: {e}>" ) return cache_resp return wrapper
9,880
def display_df(df: Union[pd.DataFrame, pandas.io.formats.style.Styler]) -> pd.DataFrame: """Plot a dataframe with `max_rows` set to None aka infinity, optionally print the DataFrame's head with the given number.""" options = { "display.max_rows": None, "display.max_colwidth": None, "display.precision": 2, } with pd.option_context(*[i for option in list(options.items()) for i in option]): style = df if isinstance(df, pd.io.formats.style.Styler) else df.style display(style)
9,881
def test_fields_value(test_endpoint): """Test that ValueError is raised if invalid fields values are passed.""" with pytest.raises(ValueError): test_endpoint.filters = False
9,882
def correct_anomalies(peaks, alpha=0.05, save_name=""): """ Outlier peak detection (Grubb's test) and removal. Parameters ---------- peaks : array vector of peak locations alpha : real significance level for Grubb's test save_name : str filename to save peaks as to, empty does not save Results ------- corrected_peaks2 : array vector of corrected peak locations max_indices : array indices of original peaks marked as too slow min_indices : array indices of original peaks marked as too fast """ from outliers import smirnov_grubbs as grubbs peak_diffs = abs(np.diff(peaks)) max_indices = grubbs.max_test_indices(peak_diffs, alpha=alpha) min_indices = grubbs.min_test_indices(peak_diffs, alpha=alpha) grubb_idxs = max_indices + min_indices # Compute representative difference based on its distribution mean_rr = np.mean( peak_diffs[[ii for ii in range(len(peak_diffs)) if ii not in grubb_idxs]] ) mean_rr = int(np.round(mean_rr)) corrected_peaks = peaks.copy() for ix in max_indices: n = int(np.round((peaks[ix + 1] - peaks[ix]) / mean_rr)) if n == 1: continue new_peaks = np.linspace(peaks[ix], peaks[ix + 1], n, dtype=int, endpoint=False)[1:] corrected_peaks = np.append(corrected_peaks, new_peaks) corrected_peaks = np.sort(corrected_peaks) corrected_peak_diffs = abs(np.diff(corrected_peaks)) min_indices = grubbs.min_test_indices(corrected_peak_diffs, alpha=alpha) # deleting peak such that resultant RR interval is furthest from mean RR # (i.e. gives longer RR interval) too_fast = np.array(min_indices) # index of peaks to delete (and then reinsert) peaks_to_replace = np.zeros_like(too_fast) new_peaks2 = np.zeros_like(too_fast, dtype=float) for index, i in enumerate(too_fast): # print(index, i) if i == (corrected_peak_diffs.size - 1): # if last RR interval (edge case) peaks_to_replace[index] = i # replace first peak # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replace first peak new_diff1 = corrected_peaks[i + 1] - corrected_peaks[i - 1] # replace second peak new_diff2 = corrected_peaks[i + 2] - corrected_peaks[i] if new_diff1 > new_diff2: # replacing first peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replacing second peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i + 1 # compute new diff_peak new_diff = (corrected_peaks[i + 2] - corrected_peaks[i])/2 new_peaks2[index] = corrected_peaks[i] + new_diff corrected_peaks2 = corrected_peaks.copy() np.put(corrected_peaks2, peaks_to_replace.astype(int), new_peaks2) # save peaks if save_name != "": np.savetxt(save_name, corrected_peaks2, delimiter=",") return corrected_peaks2, max_indices, min_indices
9,883
def svn_wc_walk_entries(*args): """ svn_wc_walk_entries(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_callbacks_t walk_callbacks, void walk_baton, svn_boolean_t show_hidden, apr_pool_t pool) -> svn_error_t """ return apply(_wc.svn_wc_walk_entries, args)
9,884
def extract_torrents(provider, client): """ Main torrent extraction generator for non-API based providers Args: provider (str): Provider ID client (Client): Client class instance Yields: tuple: A torrent result """ definition = definitions[provider] definition = get_alias(definition, get_setting("%s_alias" % provider)) log.debug("Extracting torrents from %s using definitions: %s" % (provider, repr(definition))) if not client.content: if get_setting("use_debug_parser", bool): log.debug("[%s] Parser debug | Page content is empty" % provider) raise StopIteration dom = Html().feed(client.content) key_search = get_search_query(definition, "key") row_search = get_search_query(definition, "row") name_search = get_search_query(definition, "name") torrent_search = get_search_query(definition, "torrent") info_hash_search = get_search_query(definition, "infohash") size_search = get_search_query(definition, "size") seeds_search = get_search_query(definition, "seeds") peers_search = get_search_query(definition, "peers") referer_search = get_search_query(definition, "referer") log.debug("[%s] Parser: %s" % (provider, repr(definition['parser']))) q = Queue() threads = [] needs_subpage = 'subpage' in definition and definition['subpage'] if needs_subpage: def extract_subpage(q, name, torrent, size, seeds, peers, info_hash, referer): try: log.debug("[%s] Getting subpage at %s" % (provider, repr(torrent))) except Exception as e: import traceback log.error("[%s] Subpage logging failed with: %s" % (provider, repr(e))) map(log.debug, traceback.format_exc().split("\n")) # New client instance, otherwise it's race conditions all over the place subclient = Client() subclient.passkey = client.passkey headers = {} if "subpage_mode" in definition: if definition["subpage_mode"] == "xhr": headers['X-Requested-With'] = 'XMLHttpRequest' headers['Content-Language'] = '' if referer: headers['Referer'] = referer uri = torrent.split('|') # Split cookies for private trackers subclient.open(uri[0].encode('utf-8'), headers=headers) if 'bittorrent' in subclient.headers.get('content-type', ''): log.debug('[%s] bittorrent content-type for %s' % (provider, repr(torrent))) if len(uri) > 1: # Stick back cookies if needed torrent = '%s|%s' % (torrent, uri[1]) else: try: torrent = extract_from_page(provider, subclient.content) if torrent and not torrent.startswith('magnet') and len(uri) > 1: # Stick back cookies if needed torrent = '%s|%s' % (torrent, uri[1]) except Exception as e: import traceback log.error("[%s] Subpage extraction for %s failed with: %s" % (provider, repr(uri[0]), repr(e))) map(log.debug, traceback.format_exc().split("\n")) ret = (name, info_hash, torrent, size, seeds, peers) q.put_nowait(ret) if not dom: if get_setting("use_debug_parser", bool): log.debug("[%s] Parser debug | Could not parse DOM from page content" % provider) raise StopIteration if get_setting("use_debug_parser", bool): log.debug("[%s] Parser debug | Page content: %s" % (provider, client.content.replace('\r', '').replace('\n', ''))) key = eval(key_search) if key_search else "" if key_search and get_setting("use_debug_parser", bool): key_str = key.__str__() log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'key', key_search, key_str.replace('\r', '').replace('\n', ''))) items = eval(row_search) if get_setting("use_debug_parser", bool): log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" % (provider, len(items), 'row', row_search)) for item in items: if get_setting("use_debug_parser", bool): item_str = item.__str__() log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'row', row_search, item_str.replace('\r', '').replace('\n', ''))) if not item: continue name = eval(name_search) if name_search else "" torrent = eval(torrent_search) if torrent_search else "" size = eval(size_search) if size_search else "" seeds = eval(seeds_search) if seeds_search else "" peers = eval(peers_search) if peers_search else "" info_hash = eval(info_hash_search) if info_hash_search else "" referer = eval(referer_search) if referer_search else "" if 'magnet:?' in torrent: torrent = torrent[torrent.find('magnet:?'):] if get_setting("use_debug_parser", bool): log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'name', name_search, name)) log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'torrent', torrent_search, torrent)) log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'size', size_search, size)) log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'seeds', seeds_search, seeds)) log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'peers', peers_search, peers)) if info_hash_search: log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', info_hash_search, info_hash)) if referer_search: log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', referer_search, referer)) # Pass client cookies with torrent if private if not torrent.startswith('magnet'): user_agent = USER_AGENT if client.passkey: torrent = torrent.replace('PASSKEY', client.passkey) elif client.token: headers = {'Authorization': client.token, 'User-Agent': user_agent} log.debug("[%s] Appending headers: %s" % (provider, repr(headers))) torrent = append_headers(torrent, headers) log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent))) else: log.debug("[%s] Cookies: %s" % (provider, repr(client.cookies()))) parsed_url = urlparse(definition['root_url']) cookie_domain = '{uri.netloc}'.format(uri=parsed_url) cookie_domain = re.sub('www\d*\.', '', cookie_domain) cookies = [] for cookie in client._cookies: if cookie_domain in cookie.domain: cookies.append(cookie) headers = {} if cookies: headers = {'Cookie': ";".join(["%s=%s" % (c.name, c.value) for c in cookies]), 'User-Agent': user_agent} if client.request_headers: headers.update(client.request_headers) if client.url: headers['Referer'] = client.url headers['Origin'] = client.url else: headers = {'User-Agent': user_agent} torrent = append_headers(torrent, headers) if name and torrent and needs_subpage and not torrent.startswith('magnet'): if not torrent.startswith('http'): torrent = definition['root_url'] + torrent.encode('utf-8') t = Thread(target=extract_subpage, args=(q, name, torrent, size, seeds, peers, info_hash, referer)) threads.append(t) else: yield (name, info_hash, torrent, size, seeds, peers) if needs_subpage: log.debug("[%s] Starting subpage threads..." % provider) for t in threads: t.start() for t in threads: t.join() log.debug("[%s] Threads returned: %s" % (provider, repr(threads))) for i in range(q.qsize()): ret = q.get_nowait() log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret))) yield ret
9,885
def dlp_to_datacatalog_builder( taskgroup: TaskGroup, datastore: str, project_id: str, table_id: str, dataset_id: str, table_dlp_config: DlpTableConfig, next_task: BaseOperator, dag, ) -> TaskGroup: """ Method for returning a Task Group for scannign a table with DLP, and creating BigQuery policy tags based on the results 1) Scan table with DLP and write results to BigQuery 2) Schedule future DLP 3) Read results of DLP scan from BigQuery 4) Update Policy Tags in BQ Returns the first task """ assert table_dlp_config.source_config is not None # setup tables vars dlp_results_dataset_id = table_dlp_config.source_config.results_dataset_id table_ref = TableReference(DatasetReference(project_id, dataset_id), table_id) dlp_results_table_ref = TableReference( DatasetReference(project_id, dlp_results_dataset_id), f"{table_id}_dlp_results" ) dlp_results_table = f"{dlp_results_table_ref.project}.{dlp_results_table_ref.dataset_id}.{dlp_results_table_ref.table_id}" # setup DLP scan vars dlp_template_name = table_dlp_config.get_template_name() rows_limit_percent = table_dlp_config.get_rows_limit_percent() inspect_job = build_inspect_job_config( dlp_template_name, table_ref, rows_limit_percent, dlp_results_table_ref ) # 1 First delete the results table delete_dlp_results = BigQueryDeleteTableOperator( task_id=f"delete_old_dlp_results_{datastore}", deletion_dataset_table=dlp_results_table, ignore_if_missing=True, task_group=taskgroup, dag=dag, ) # 2 Scan table scan_task = CloudDLPCreateDLPJobOperator( task_id=f"scan_table_{datastore}", project_id=project_id, inspect_job=inspect_job, wait_until_finished=True, task_group=taskgroup, dag=dag, ) # 4. Read results read_results_task = DlpBQInspectionResultsOperator( task_id=f"read_dlp_results_{datastore}", project_id=dlp_results_table_ref.project, dataset_id=dlp_results_table_ref.dataset_id, table_id=dlp_results_table_ref.table_id, do_xcom_push=True, min_match_count=table_dlp_config.get_min_match_count(), task_group=taskgroup, dag=dag, ) # 5. Update policy tags update_tags_task = PythonOperator( task_id=f"update_bq_policy_tags_{datastore}", python_callable=update_bq_policy_tags, # <--- PYTHON LIBRARY THAT COPIES FILES FROM SRC TO DEST task_group=taskgroup, dag=dag, templates_dict={ "dlp_results": f"{{{{ti.xcom_pull(task_ids='{read_results_task.task_id}')}}}}", # "dlp_results": "{{ti.xcom_pull(task_ids='dlp_policy_tags.read_dlp_results_test')}}", }, op_kwargs={ "project_id": project_id, "dataset_id": table_ref.dataset_id, "table_id": table_ref.table_id, "policy_tag_config": table_dlp_config.source_config.policy_tag_config, "task_ids": read_results_task.task_id, }, provide_context=True, ) delete_dlp_results >> scan_task >> read_results_task >> update_tags_task >> next_task return delete_dlp_results
9,886
def upload_assessors(xnat, projects, resdir, num_threads=1): """ Upload all assessors to XNAT :param xnat: pyxnat.Interface object :param projects: list of projects to upload to XNAT :return: None """ # Get the assessor label from the directory : assessors_list = get_assessor_list(projects, resdir) number_of_processes = len(assessors_list) warnings = list() LOGGER.info(('Starting upload pool:{} threads'.format(str(num_threads)))) sys.stdout.flush() pool = Pool(processes=num_threads) for index, assessor_label in enumerate(assessors_list): LOGGER.info(index) sys.stdout.flush() pool.apply_async( upload_thread, [xnat, index, assessor_label, number_of_processes, resdir]) LOGGER.info('waiting for upload pool to finish...') sys.stdout.flush() pool.close() pool.join() LOGGER.info('upload pool finished') sys.stdout.flush() return warnings
9,887
def save_txt(content, filename, append=False, empty=False): """ Save the content onto a file. :param content: (string) the string content to save. :param filename: (string) the absolute file path. :param append: (bool) if True, append to an existing file. :param empty: (bool) if True, the file is emptied. :return: (void) """ create_dir_tree(filename) if empty: empty_file(filename) mode = "a+" if append else "w+" with open(filename, mode) as f: f.write(str(content))
9,888
def dedent(ind, text): """ Dedent text to the specific indentation level. :param ind: common indentation level for the resulting text (number of spaces to append to every line) :param text: text that should be transformed. :return: ``text`` with all common indentation removed, and then the specified amount of indentation added. """ text2 = textwrap.dedent(text) if ind == 0: return text2 indent_str = " " * ind return "\n".join(indent_str + line for line in text2.split("\n"))
9,889
def since(timestamp=None, directory=os.getcwd()): # noqa WPS404, B008 """since.""" if not timestamp: return WRONG_ARGUMENT try: timestamp = int(timestamp) except Exception: return WRONG_ARGUMENT if not os.path.exists(directory): return 'dir not found' dir_content = ls(directory) if not dir_content: return 'dir is empty' return [ item_object for item_object in dir_content if os.stat('{0}/{1}'.format( directory, item_object, )).st_ctime > timestamp ]
9,890
def get_vendor(request): """ Returns the ``JSON`` serialized data of the requested vendor on ``GET`` request. .. http:get:: /get_vendor/ Gets the JSON serialized data of the requested vendor. **Example request**: .. sourcecode:: http GET /get_vendor/ HTTP/1.1 Host: localhost:8000 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 :param vendor_id: Vendor primary key. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json; charset=utf-8 [ { "name": "Lug Vendor", "identifier": "TBPN-02692", "gstin": "89AAC4683897343", "address": { "name": "Kuame Burns", "address": "Nonummy Avenue", "city": "Chung Cheong", "phone": "679 166-3127", "state": "Guanacaste", "country": "tellusidnunc.net", "post": "8949" } } ] :resheader Content-Type: application/json :statuscode 200: List of vendors received successfully. :statuscode 400: Bad request version :statuscode 500: Vendor matching query does not exist. """ if request.method == 'GET': vendor_id = request.GET.get('vendor_id') vendor = VendorSerializer(Vendor.objects.get(id=vendor_id)) return JsonResponse(vendor.data)
9,891
def ensure_bin_str(s): """assert type of s is basestring and convert s to byte string""" assert isinstance(s, basestring), 's should be string' if isinstance(s, unicode): s = s.encode('utf-8') return s
9,892
def test_plot_water_levels(): """ Task 2E by Tian Ern """ # did not test matplotlib figure due to several challenges assert True
9,893
def test_item_setitem(supported_value): """Properties can be set.""" instance = holocron.Item() instance["x"] = supported_value instance["y"] = 42 assert instance["x"] == supported_value assert instance["y"] == 42 assert instance == holocron.Item(x=supported_value, y=42)
9,894
def _word_accuracy(pred_data, ref_data): """compute word-level accuracy""" pred_size = len(pred_data) ref_size = len(ref_data) if pred_size <= 0 or ref_size <= 0: raise ValueError("size of predict or reference data is less than or equal to 0") if pred_size != ref_size: raise ValueError("size of predict and reference data don't match") total_count = 0 for i in range(pred_size): pred_word = pred_data[i].strip().slipt(" ") ref_word = ref_data[i].strip().slipt(" ") pred_len = len(pred_word) ref_len = len(ref_word) match_count = 0 for j in range(min(pred_len, ref_len)): predict_word = pred_word[j] reference_word = ref_word[j] if predict_word == reference_word: match_count += 1 total_accuracy += 100.0 * match_count / max(pred_len, ref_len) total_count += 1 word_accuracy = total_accuracy / total_count return word_accuracy
9,895
def cli(ffmpeg, source): """FFMPEG capture frame as image.""" stream = Test(ffmpeg_bin=ffmpeg) stream.run_test( input_source=source, )
9,896
def check_interface(interface: str) -> str: """ Check that the interface we've been asked to run on actually exists """ log = logging.getLogger(inspect.stack()[0][3]) discovered_interfaces = [] for iface in os.listdir("/sys/class/net"): iface_path = os.path.join("/sys/class/net", iface) if os.path.isdir(iface_path): if "phy80211" in os.listdir(iface_path): discovered_interfaces.append(iface) if interface not in discovered_interfaces: log.warning( "%s interface not found in phy80211 interfaces: %s", interface, discovered_interfaces, ) raise ValueError(f"{interface} is not a valid interface") else: log.debug( "%s is in discovered interfaces: [%s]", interface, discovered_interfaces ) return interface
9,897
def api_retrieve_part(pt_id): """ Allows the client to call "retrieve" method on the server side to retrieve the part from the ledger. Args: pt_id (str): The uuid of the part Returns: type: str String representing JSON object which allows the client to know that the call was either a success or a failure. """ response = requests.get( "http://127.0.0.1:852/tp/part/{}".format(pt_id) ) output = response.content.decode("utf-8").strip() return output
9,898
def test_missing_manifest_link(): """Test that missing linked manifests are properly flagged.""" err = ErrorBundle() package = MockXPI({ "chrome.manifest": "tests/resources/submain/linkman/base1.manifest"}) submain.populate_chrome_manifest(err, package) chrome = err.get_resource("chrome.manifest") assert chrome assert not err.failed() assert err.notices # From the base file: assert list(chrome.get_triples(subject="foo")) # From the linked manifest: assert not list(chrome.get_triples(subject="zap"))
9,899