content
stringlengths
22
815k
id
int64
0
4.91M
def check_cardinality(attribute_name: str, analysis: run_metadata_pb2.Analysis ) -> Union[None, str]: """Check whether the cardinality exceeds the predefined threshold Args: attribute_name: (string), analysis: (run_metadata_pb2.Analysis), analysis that contain the result of cardinality Returns: Union[None, string] """ metrics = analysis.smetrics cardinality = 0 for item in metrics: if item.name == run_metadata_pb2.ScalarMetric.CARDINALITY: cardinality = item.value if cardinality > CARDINALITY_THRESHOLD: return template.HIGH_CARDINALITY.format( name=attribute_name, value=cardinality ) return None
5,335,000
def create_df_from(dataset): """ Selects a method, based on the given dataset name, and creates the corresponding dataframe. When adding a new method, take care to have as index the ASN and the column names to be of the format "dataset_name_"+"column_name" (e.g., the column "X" from the dataset "setA", should be "setA_X") :param dataset: (type = string) name of the dataset to be loaded :return: A dataframe with indexes the ASNs and columns the features loaded from the given dataset """ if dataset == 'AS_rank': data = create_df_from_AS_rank() elif dataset == 'personal': data = create_df_from_personal() elif dataset == 'PeeringDB': data = create_df_from_PeeringDB() elif dataset == 'AS_hegemony': data = create_df_from_AS_hegemony() elif dataset == 'Atlas_probes': data = create_df_from_Atlas_probes() else: raise Exception('Not defined dataset') return data
5,335,001
def install_mysql(): """ ripped from http://www.muhuk.com/2010/05/how-to-install-mysql-with-fabric/ """ with settings(hide('warnings', 'stderr'), warn_only=True): result = sudo('dpkg-query --show mysql-server') if result.failed is False: warn('MySQL is already installed') return mysql_password = env.database_admin_pass sudo('echo "mysql-server-5.5 mysql-server/root_password password ' \ '%s" | debconf-set-selections' % mysql_password) sudo('echo "mysql-server-5.5 mysql-server/root_password_again password ' \ '%s" | debconf-set-selections' % mysql_password) sudo('apt-get install -y mysql-server')
5,335,002
def test_magnet_cuboid_Bfield(): """test cuboid field""" mag = np.array( [ (0, 0, 0), (1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 2, 3), (2, 2, 2), (2, 2, 2), (1, 1, 1), (1, 1, 1), ] ) dim = np.array( [ (1, 2, 3), (-1, -2, 2), (1, 2, 2), (0, 2, 2), (1, 2, 3), (2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2), ] ) pos = np.array( [ (1, 2, 3), (1, -1, 0), (1, -1, 0), (1, -1, 0), (1, 2, 3), (1, 1 + 1e-14, 0), (1, 1, 1), (1, -1, 2), (1 + 1e-14, -1, 2), ] ) B = magnet_cuboid_field("B", pos, mag, dim) Btest = [ [0.0, 0.0, 0.0], [-0.14174376, -0.16976459, -0.20427478], [-0.14174376, -0.16976459, -0.20427478], [0.0, 0.0, 0.0], [0.02596336, 0.04530334, 0.05840059], [np.inf, np.inf, -0.29516724], [0.0, 0.0, 0.0], [-0.0009913, -0.08747071, 0.04890262], [-0.0009913, -0.08747071, 0.04890262], ] np.testing.assert_allclose(B, Btest, rtol=1e-5)
5,335,003
def plasma_fractal(mapsize=256, wibbledecay=3): """ Generate a heightmap using diamond-square algorithm. Return square 2d array, side length 'mapsize', of floats in range 0-255. 'mapsize' must be a power of two. """ assert (mapsize & (mapsize - 1) == 0) maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): """For each square of points stepsize apart, calculate middle value as mean of points + wibble""" cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): """For each diamond of points stepsize apart, calculate middle value as mean of points + wibble""" mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max()
5,335,004
def reduce_puzzle(values): """Reduce a Sudoku puzzle by repeatedly applying all constraint strategies Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary after continued application of the constraint strategies no longer produces any changes, or False if the puzzle is unsolvable """ # TODO: Copy your code from the classroom and modify it to complete this function stalled = False unsolved_boxes = lambda values: len([b for b in boxes if len(values[b]) > 1]) unsolved_boxes_before = unsolved_boxes(values) while not stalled: values = eliminate(values) unsolved_boxes_after = unsolved_boxes(values) if unsolved_boxes_after == 0: stalled = True values = only_choice(values) unsolved_boxes_after = unsolved_boxes(values) if unsolved_boxes_after == 0: stalled = True values = naked_twins(values) unsolved_boxes_after = unsolved_boxes(values) if unsolved_boxes_after == 0: stalled = True # Make sure you stop when your stuck if unsolved_boxes_after == unsolved_boxes_before: stalled = True # Catch unsolvable cases if any(len(v) == 0 for v in values.values()): return False # Update number of unsolved boxes unsolved_boxes_before = unsolved_boxes_after return values
5,335,005
def read_ids(): """ Reads the content from a file as a tuple and returns the tuple :return: node_id, pool_id (or False if no file) """ if not const.MEMORY_FILE.exists(): return False with open(const.MEMORY_FILE, 'rb') as f: data = pickle.load(f) assert type(data) is tuple and len(data) == 2 node_id, pool_id = data return node_id, pool_id
5,335,006
def _match_caller_callee_argument_dimension_(program, callee_function_name): """ Returns a copy of *program* with the instance of :class:`loopy.kernel.function_interface.CallableKernel` addressed by *callee_function_name* in the *program* aligned with the argument dimensions required by *caller_knl*. .. note:: The callee kernel addressed by *callee_function_name*, should be called at only one location throughout the program, as multiple invocations would demand complex renaming logic which is not implemented yet. """ assert isinstance(program, TranslationUnit) assert isinstance(callee_function_name, str) assert callee_function_name not in program.entrypoints assert callee_function_name in program.callables_table is_invoking_callee = _FunctionCalledChecker( callee_function_name).map_kernel caller_knl, = [in_knl_callable.subkernel for in_knl_callable in program.callables_table.values() if isinstance(in_knl_callable, CallableKernel) and is_invoking_callee(in_knl_callable.subkernel)] from pymbolic.primitives import Call assert len([insn for insn in caller_knl.instructions if (isinstance(insn, CallInstruction) and isinstance(insn.expression, Call) and insn.expression.function.name == callee_function_name)]) == 1 new_callee_kernel = _match_caller_callee_argument_dimension_for_single_kernel( caller_knl, program[callee_function_name]) return program.with_kernel(new_callee_kernel)
5,335,007
def fix_arp_2(): """Fixes net.ipv4.conf.all.arp_ignore""" vprint("Fixing ARP setting") exe("sysctl -w net.ipv4.conf.all.arp_ignore=1")
5,335,008
def new_automation_jobs(issues): """ :param issues: issues object pulled from Redmine API :return: returns a new subset of issues that are Status: NEW and match a term in AUTOMATOR_KEYWORDS) """ new_jobs = {} for issue in issues: # Only new issues if issue.status.name == 'New': # Strip whitespace and make lowercase ('subject' is the job type i.e. Diversitree) subject = issue.subject.lower().replace(' ', '') # Check for presence of an automator keyword in subject line if subject == 'iridaretrieve': new_jobs[issue] = subject return new_jobs
5,335,009
def resolve_test_data_path(test_data_file): """ helper function to ensure filepath is valid for different testing context (setuptools, directly, etc.) :param test_data_file: Relative path to an input file. :returns: Full path to the input file. """ if os.path.exists(test_data_file): return test_data_file else: path = os.path.join('woudc_data_registry', 'tests', test_data_file) if os.path.exists(path): return path
5,335,010
def pit_two_sats_sqlserver(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_LOGIN_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN_TS": { "source_model": "STG_CUSTOMER_LOGIN_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"], "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"], "RAW_STAGE_LOGIN_TS": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR(50)", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_LOGIN_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR(50)", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_LOGIN_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } } }
5,335,011
def test_query_non_ascii(client): """Test if query strings with non-ascii characters work. There was a bug that forced query strings to be all ascii. The bug only occured with Python 2. It was fixed in change 8d5132d. """ # String literal with unicode characters that will be understood # by both Python 2 and Python 3. fullName = b'Rudolph Beck-D\xc3\xbclmen'.decode('utf8') query = Query(client, "User", conditions={ "fullName": "= '%s'" % fullName }) print(str(query)) res = client.search(query) assert len(res) == 1
5,335,012
def make_data(revs, word_idx_map, max_l=50, filter_h=3, val_test_splits=[2, 3], validation_num=500000): """ Transforms sentences into a 2-d matrix. """ version = begin_time() train, val, test = [], [], [] for rev in revs: sent = get_idx_from_sent_msg(rev["m"], word_idx_map, max_l, True) sent += get_idx_from_sent(rev["r"], word_idx_map, max_l, True) sent += get_session_mask(rev["m"]) sent.append(int(rev["y"])) if len(val) >= validation_num: train.append(sent) else: val.append(sent) train = np.array(train, dtype="int") val = np.array(val, dtype="int") test = np.array(test, dtype="int") print('trainning data', len(train), 'val data', len(val), 'spend time:', spend_time(version)) return [train, val, test]
5,335,013
def init(param_test): """ Initialize class: param_test """ # initialization param_test.default_args_values = {'di': 6.85, 'da': 7.65, 'db': 7.02} default_args = ['-di 6.85 -da 7.65 -db 7.02'] # default parameters param_test.default_result = 6.612133606 # assign default params if not param_test.args: param_test.args = default_args return param_test
5,335,014
def add_width_to_df(df): """Adds an extra column "width" to df which is the angular width of the CME in degrees. """ df = add_helcats_to_df(df, 'PA-N [deg]') df = add_helcats_to_df(df, 'PA-S [deg]') df = add_col_to_df(df, 'PA-N [deg]', 'PA-S [deg]', 'subtract', 'width', abs_col=True) return df
5,335,015
def main(): """Run P4 commands to collect metrics.""" desc = """Examine p4 changes and jobs to report metrics.""" parser = argparse.ArgumentParser(description=desc) parser.add_argument('--after', type=int, help="select changes after (and including) the given change") args = parser.parse_args() p4 = P4.P4() p4.connect() user = p4.fetch_user() if user: process_changes(p4, user["User"], args.after) else: sys.stderr.write("Cannot retrieve current Perforce user\n")
5,335,016
def save( basename: str, model: Model, format_input: FormatInput, format_output: FormatOutput, ) -> None: """save model, format_input, format_output Args: basename (str): dirname + label, e.g., xxxx/best model (Model): target model format_input (FormatInput): target format_input format_output (FormatOutput): target format_output """ model.save(basename) format_input.save(basename) format_output.save(basename)
5,335,017
def test_simple_import(barred_tac_list_importer, logger, db_conn): """Verify that the valid barred list data can be successfully imported into the db.""" expect_success(barred_tac_list_importer, 6, db_conn, logger)
5,335,018
def checkSeconds(seconds, timestamp): """ Return a string depending on the value of seconds If the block is mined since one hour ago, return timestamp """ if 3600 > seconds > 60: minute = int(seconds / 60) if minute == 1: return '{} minute ago'.format(minute) return '{} minutes ago'.format(minute) else: return 'Since {} sec'.format(seconds)
5,335,019
def test_value_in_dict_none(allow_false_empty): """ Test that :py:func:`riboviz.utils.value_in_dict` returns ``False` if a key has value ``None`` regardless of the value of ``allow_false_empty``. :param allow_false_empty: Value for ``allow_false_empty`` \ parameter :type allow_false_empty: bool """ values = {"A": 1, "B": None, "C": 3} assert not utils.value_in_dict("B", values, allow_false_empty)
5,335,020
def strict_application( library: pd.DataFrame, config: Config, errors: list ) -> _DfGenerator: """ Apply the recorded template to each row of reactants in the library to make new, false reactions. :param library: the reaction library :param config: the configuration :param errors: a list to fill with strings of produced errors :yield: a new DataFrame with a false reaction for each row if a match could be found, otherwise None """ for _, row in library.iterrows(): try: new_df = _apply_forward_reaction(row, config) except _ReactionException as err: errors.append(str(err)) else: yield new_df
5,335,021
def get_netrange_end(asn_cidr): """ :param str asn_cidr: ASN CIDR :return: ipv4 address of last IP in netrange :rtype: str """ try: last_in_netrange = \ ip2long(str(ipcalc.Network(asn_cidr).host_first())) + \ ipcalc.Network(asn_cidr).size() - 2 except ValueError, error: print 'Issue calculating size of %s network' % asn_cidr raise error return socket.inet_ntoa(struct.pack('!L', last_in_netrange))
5,335,022
def load_from_file(filepath, column_offset=0, prefix='', safe_urls=False, delimiter='\s+'): """ Load target entities and their labels if exist from a file. :param filepath: Path to the target entities :param column_offset: offset to the entities column (optional). :param prefix: URI prefix (Ex: https://yago-expr.org) if the data lacks one. (needed when using rdflib and/or virtouso) (optional) :param safe_urls: Encode URIs if they are not safe for rdflib, eg. contains '(' or special chars (optional) :param delimiter: splitting delimiter in the file (optional) :return: EntityLabelsInterface object to access the entities and their labels and also to use them as triples. :rtype: EntitiesLabelsFile """ return EntitiesLabelsFile(filepath, column_offset=column_offset, prefix=prefix, safe_urls=safe_urls, delimiter=delimiter)
5,335,023
def validate_types(validator, schema, services, rsc_type, rscs): """Validate the basic types in the schema.""" properties = schema['definitions'][rsc_type]['properties'] # Go through the schema; find the strings, integers, and enums and # change them to invalid values and verify that the validator catches # the errors for key, value in properties.items(): val_type = value.get('type') if val_type is not None: for idx, rsc in enumerate(services[rscs]): # check strings if val_type == 'string': validate_string(validator, schema, services, rsc, key, value) # check integers if val_type == 'integer': validate_integer(validator, schema, services, rsc, key, value) # check enums enum = value.get('enum') if enum is not None: tmp = rsc.get(key) if tmp is not None: rsc[key] = 'This string will match no enums' result = validate(validator, services) assert result == 'Validator Error' rsc[key] = tmp
5,335,024
def create_app(*, config_object: Config) -> connexion.App: """Create app instance.""" connexion_app = connexion.App( __name__, debug=config_object.DEBUG, specification_dir="spec/" ) flask_app = connexion_app.app flask_app.config.from_object(config_object) connexion_app.add_api("api.yaml") return connexion_app
5,335,025
def load_plugins(descr: str, package: str, plugin_class: Any, specs: TList[TDict[str, Any]] = None) -> \ TDict[Union[str, int], Any]: """ Load and initialize plugins from the given directory :param descr: plugin description :param package: plugin package name relative to afterglow_core, e.g. "resources.data_provider_plugins" :param plugin_class: base plugin class :param specs: list of plugin specifications: [{"name": "plugin_name", "param": value, ...}, ...]; parameters are used to construct the plugin class; this can be the value of the corresponding option in app config, e.g. DATA_PROVIDERS; if omitted or None, load all available plugins without passing any parameters on initialization (suitable e.g. for the jobs) :return: dictionary containing plugin class instances indexed by their unique IDs (both as integers and strings) """ if not specs and specs is not None: # No plugins of this type are required return {} directory = os.path.normpath(os.path.join( os.path.dirname(__file__), package.replace('.', os.path.sep))) app.logger.debug('Looking for %s plugins in %s', descr, directory) # Search for modules within the specified directory # noinspection PyBroadException try: # py2exe/freeze support if not isinstance(__loader__, zipimport.zipimporter): raise Exception() archive = zipfile.ZipFile(__loader__.archive) try: dirlist = [name for name in archive.namelist() if name.startswith(directory.replace('\\', '/'))] finally: archive.close() except Exception: # Normal installation # noinspection PyBroadException try: dirlist = os.listdir(directory) except Exception: dirlist = [] dirlist = [os.path.split(name)[1] for name in dirlist] plugin_classes = {} for name in {os.path.splitext(f)[0] for f in dirlist if os.path.splitext(f)[1] in PY_SUFFIXES and os.path.splitext(f)[0] != '__init__'}: # noinspection PyBroadException try: app.logger.debug('Checking module "%s"', name) # A potential plugin module is found; load it m = __import__( 'afterglow_core.' + package + '.' + name, globals(), locals(), ['__dict__']) try: # Check only names listed in __all__ items = (m.__dict__[_name] for _name in m.__dict__['__all__']) except KeyError: # If no __all__ is present in the module, check all globals items = m.__dict__.values() # Scan all items defined in the module, looking for classes # derived from "plugin_class" for item in items: try: if issubclass(item, plugin_class) and \ item is not plugin_class and \ getattr(item, '__polymorphic_on__', None) and \ hasattr(item, item.__polymorphic_on__) and \ isinstance(getattr(item, item.__polymorphic_on__), str) and \ item.__module__ == m.__name__: plugin_classes[getattr(item, item.__polymorphic_on__)] = item app.logger.debug( 'Found %s plugin "%s"', descr, getattr(item, item.__polymorphic_on__)) except TypeError: pass except Exception: # Ignore modules that could not be imported app.logger.debug( 'Could not import module "%s"', name, exc_info=True) plugins = {} if specs is None: # Initialize all available plugins without any options for name, klass in plugin_classes.items(): # Initialize plugin instance; provide the polymorphic field equal # to plugin name to instantiate the appropriate subclass instead # of the base plugin class try: instance = klass( _set_defaults=True, **{klass.__polymorphic_on__: getattr(klass, klass.__polymorphic_on__)}) except Exception: app.logger.exception( 'Error loading %s plugin "%s"', descr, name) raise add_plugin(plugins, descr, instance) else: # Instantiate only the given plugins using the specified display names # and options for id, spec in enumerate(specs): try: name = spec.pop('name') except (TypeError, KeyError): raise RuntimeError( 'Missing name in {} plugin spec ({})'.format(descr, spec)) try: klass = plugin_classes[name] except KeyError: raise RuntimeError( 'Unknown {} plugin "{}"'.format(descr, name)) # Initialize plugin instance using the provided parameters try: instance = klass(**spec) except Exception: app.logger.exception( 'Error loading %s plugin "%s" with options %s', descr, name, spec) raise add_plugin(plugins, descr, instance, id) return plugins
5,335,026
def _get_repos_info(db: Session, user_id: int): """Returns data for all starred repositories for a user. The return is in a good format for the frontend. Args: db (Session): sqlAlchemy connection object user_id (int): User id Returns: list[Repository(dict)]:repo_info = { "id": (int), "github_repo_id": (int), "name": (str), "description": (str), "html_url": (str), "tags": list[dict] } """ repos = _get_repos_in_db(db=db, user_id=user_id, only_starred_repos=True) list_of_repos = [] for repo in repos: repo_info = { "id": repo.id, "github_repo_id": repo.github_repo_id, "name": repo.name, "description": repo.description, "html_url": repo.html_url, "tags": _get_all_tags_in_repo(repo_id=repo.id, db=db) } list_of_repos.append(repo_info) return list_of_repos
5,335,027
def handler(signum, _): """ signal handler """ if(signum == signal.SIGUSR1): if controller: controller.reload(verbose=True) return print(f' interupt has been caught ({signum}), shutting down...') if controller: controller.shutdown() exit(0)
5,335,028
def ultimate_oscillator(close_data, low_data): """ Ultimate Oscillator. Formula: UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1) """ a7 = 4 * average_7(close_data, low_data) a14 = 2 * average_14(close_data, low_data) a28 = average_28(close_data, low_data) uo = 100 * ((a7 + a14 + a28) / 7) return uo
5,335,029
async def test_ws_setup_depose_mfa(opp, opp_ws_client): """Test set up mfa module for current user.""" opp.auth = await auth_manager_from_config( opp, provider_configs=[ { "type": "insecure_example", "users": [ { "username": "test-user", "password": "test-pass", "name": "Test Name", } ], } ], module_configs=[ { "type": "insecure_example", "id": "example_module", "data": [{"user_id": "mock-user", "pin": "123456"}], } ], ) ensure_auth_manager_loaded(opp.auth) await async_setup_component(opp, "auth", {"http": {}}) user = MockUser(id="mock-user").add_to_opp(opp) cred = await opp.auth.auth_providers[0].async_get_or_create_credentials( {"username": "test-user"} ) await opp.auth.async_link_user(user, cred) refresh_token = await opp.auth.async_create_refresh_token(user, CLIENT_ID) access_token = opp.auth.async_create_access_token(refresh_token) client = await opp_ws_client(opp, access_token) await client.send_json({"id": 10, "type": mfa_setup_flow.WS_TYPE_SETUP_MFA}) result = await client.receive_json() assert result["id"] == 10 assert result["success"] is False assert result["error"]["code"] == "no_module" await client.send_json( { "id": 11, "type": mfa_setup_flow.WS_TYPE_SETUP_MFA, "mfa_module_id": "example_module", } ) result = await client.receive_json() assert result["id"] == 11 assert result["success"] flow = result["result"] assert flow["type"] == data_entry_flow.RESULT_TYPE_FORM assert flow["handler"] == "example_module" assert flow["step_id"] == "init" assert flow["data_schema"][0] == {"type": "string", "name": "pin"} await client.send_json( { "id": 12, "type": mfa_setup_flow.WS_TYPE_SETUP_MFA, "flow_id": flow["flow_id"], "user_input": {"pin": "654321"}, } ) result = await client.receive_json() assert result["id"] == 12 assert result["success"] flow = result["result"] assert flow["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert flow["handler"] == "example_module" assert flow["data"]["result"] is None await client.send_json( { "id": 13, "type": mfa_setup_flow.WS_TYPE_DEPOSE_MFA, "mfa_module_id": "invalid_id", } ) result = await client.receive_json() assert result["id"] == 13 assert result["success"] is False assert result["error"]["code"] == "disable_failed" await client.send_json( { "id": 14, "type": mfa_setup_flow.WS_TYPE_DEPOSE_MFA, "mfa_module_id": "example_module", } ) result = await client.receive_json() assert result["id"] == 14 assert result["success"] assert result["result"] == "done"
5,335,030
def generate_csrf(request: StarletteRequest, secret_key: str, field_name: str): """Generate a new token, store it in the session and return a time-signed token. If a token is already present in the session, it will be used to generate a new time signed token. The time-signed token is cached per request so multiple calls to this function will return the same time-signed token. Args: request (:class:`starlette.requests.Request`): The request instance. secret_key (str): The signing key. field_name (str): Where the token is stored in the session. Returns: str: The time-signed token """ if not hasattr(request.state, field_name): # handle Secret instances if isinstance(secret_key, Secret): secret_key = str(secret_key) s = URLSafeTimedSerializer(secret_key, salt='wtf-csrf-token') session = request.session # get/set token in session if field_name not in session: session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest() try: token = s.dumps(session[field_name]) except TypeError: session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest() token = s.dumps(session[field_name]) setattr(request.state, field_name, token) return getattr(request.state, field_name)
5,335,031
def get_completions(): """ Returns the global completion list. """ return completionList
5,335,032
def list_pars(names_only=True, kind=None): """ Print all parameters in all models. If *names_only* then only print the parameter name, not the models it occurs in. """ partable = find_pars(kind) if names_only: print(columnize(list(sorted(partable.keys())))) else: for k, v in sorted(partable.items()): print("%s: %s"%(k, ", ".join(v)))
5,335,033
def breadth_first_search(): """ BFS Algorithm """ initial_state = State(3, 3, "left", 0, 0) if initial_state.is_goal(): return initial_state frontier = list() explored = set() frontier.append(initial_state) while frontier: state = frontier.pop(0) if state.is_goal(): return state explored.add(state) children = successors(state) for child in children: if (child not in explored) or (child not in frontier): frontier.append(child) return None
5,335,034
def rankSimilarity(df, top = True, rank = 3): """ Returns the most similar documents or least similar documents args: df (pandas.Dataframe): row, col = documents, value = boolean similarity top (boolean): True: most, False: least (default = True) rank (int): number of top or bottom (default = 3) returns: pandas.Dataframe: row =rank, columns = indices, names, value """ df2 = df.copy(deep = True) df_np = df2.as_matrix() if top: np.fill_diagonal(df_np, -1) results_dic = {"indices": [], "names": [], "value": [] } for n in range(rank): if top: indices = np.unravel_index(df_np.argmax(), df_np.shape) # returns indices of first max found # np.where(df_np == df_np.max()) # will return all indices of maxs else: indices = np.unravel_index(df_np.argmin(), df_np.shape) # returns indices of first min found # np.where(df_np == df_np.min()) # will return all indices of mins results_dic["indices"].append(indices) results_dic["names"].append((df.index[indices[0]], df.index[indices[1]])) results_dic["value"].append(df.iloc[indices]) if top: df_np[indices[0],indices[1]] = -1 # set to -1 to find the next max df_np[indices[1],indices[0]] = -1 # because symmetric else: df_np[indices[0],indices[1]] = 1 # set to 1 to find the next min df_np[indices[1],indices[0]] = 1 # because symmetric df_result = pandas.DataFrame(results_dic, index = range(1,rank+1)) df_result.index.name = "rank" return df_result
5,335,035
def create_database(): """ If this script is run directly, create all the tables necessary to run the application. """ try: Episodes.create_table() except: logi("Error happened.") print("All tables created")
5,335,036
def vvd(val, valok, dval, func, test, status): """Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)""" assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
5,335,037
def change_controller(move_group, second_try=False): """ Changes between motor controllers move_group -> Name of required move group. """ global list_controllers_service global switch_controllers_service controller_map = { 'gripper': 'cartesian_motor_controller', 'whole_arm': 'cartesian_motor_controller', 'realsense': 'cartesian_motor_controller_realsense', 'sucker': 'cartesian_motor_controller_sucker', 'wrist_only': 'cartesian_motor_controller_wrist' } rospy.loginfo('SWITCHING CONTROLLERS') if move_group not in controller_map: rospy.logerr('%s is not a valid move group for switching controllers' % move_group) return False wanted_controller = controller_map[move_group] c_list = list_controllers_service.call() running_controllers = [] for c in c_list.controller: if c.name == 'joint_state_controller': continue if c.name == wanted_controller and c.state == 'running': rospy.loginfo('Controller %s is already running' % wanted_controller) return True if c.state == 'running': running_controllers.append(c.name) controllerSwitch = cmsv.SwitchControllerRequest() controllerSwitch.strictness = 1 controllerSwitch.start_controllers = [wanted_controller] controllerSwitch.stop_controllers = running_controllers # Return True if controller was successfully switched res = switch_controllers_service(controllerSwitch).ok if res: rospy.loginfo('Successfully switched controllers for move group %s' % move_group) return res elif second_try == False: rospy.logerr('Failed to switch controllers for move group %s' % move_group) rospy.sleep(1.0) return change_controller(move_group, True) else: return False
5,335,038
def base64_encode(text): """<string> -- Encode <string> with base64.""" return base64.b64encode(text.encode()).decode()
5,335,039
def test_netconf_edit_config_bad_operation(ssh, nornir, sros_config_payload): """Test NETCONF edit-config, unsupported default operation.""" # Create Fake RPC Object class. Set 'ok' attr to True. response_rpc = FakeRpcObject() response_rpc.set_ok(set=True) # Create a Mock Object. Assign 'edit-config' method and response # as the Fake RPC Object. response = MagicMock() response.edit_config.return_value = response_rpc ssh.return_value = response nr = nornir.filter(name="netconf2") result = nr.run(netconf_edit_config, target="candidate", config=sros_config_payload, default_operation="MARGE") # print_result(result) assert result["netconf2"].failed
5,335,040
def _signed_bin(n): """Transform n into an optimized signed binary representation""" r = [] while n > 1: if n & 1: cp = _gbd(n + 1) cn = _gbd(n - 1) if cp > cn: # -1 leaves more zeroes -> subtract -1 (= +1) r.append(-1) n += 1 else: # +1 leaves more zeroes -> subtract +1 (= -1) r.append(+1) n -= 1 else: r.append(0) # be glad about one more zero n >>= 1 r.append(n) return r[::-1]
5,335,041
def hurricanes(): """Manages hurricanes indices""" pass
5,335,042
def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), name) return driver.build_tests(test_dir, loader, host=data['host'], port=data['port'], prefix=data['prefix'], fixture_module=fixtures)
5,335,043
def powderfit(powder, scans=None, peaks=None, ki=None, dmono=3.355, spacegroup=1): """Fit powder peaks of a powder sample to calibrate instrument wavelength. First argument is either a string that names a known material (currently only ``'YIG'`` is available) or a cubic lattice parameter. Then you need to give either scan numbers (*scans*) or peak positions (*peaks*) and a neutron wavevector (*ki*). Examples: >>> powderfit('YIG', scans=[1382, 1383, 1384, ...]) >>> powderfit(12.377932, peaks=[45.396, 61.344, 66.096, ...], ki=1.4) As a further argument, *dmono* is the lattice constant of the monochromator (only used to calculate monochromator 2-theta offsets), it defaults to PG (3.355 A). """ maxhkl = 10 # max H/K/L to consider when looking for d-values maxdd = 0.2 # max distance in d-value when looking for peak indices ksteps = 50 # steps with different ki dki = 0.002 # relative ki stepsize if powder == 'YIG': a = 12.377932 spacegroup = 230 session.log.info('YIG: using cubic lattice constant of %.6f A', a) session.log.info('') else: if not isinstance(powder, float): raise UsageError('first argument must be either "YIG" or a ' 'lattice constant') a = powder sg = get_spacegroup(spacegroup) # calculate (possible) d-values # loop through some hkl-sets, also consider higher harmonics... dhkls = {} for h in range(maxhkl): for k in range(maxhkl): for l in range(maxhkl): if h + k + l > 0: # assume all reflections are possible if not can_reflect(sg, h, k, l): continue G = sqrt(h*h + k*k + l*l) dhkls[a/G] = '(%d %d %d)' % (h, k, l) dhkls[a/(2*G)] = '(%d %d %d)/2' % (h, k, l) dhkls[a/(3*G)] = '(%d %d %d)/3' % (h, k, l) dhkls[a/(4*G)] = '(%d %d %d)/4' % (h, k, l) dhkls[a/(5*G)] = '(%d %d %d)/5' % (h, k, l) # generate list from dict dvals = sorted(dhkls) # fit and helper functions def dk2tt(d, k): return 2.0 * degrees(arcsin(pi/(d * k))) def model(x, k, stt0): return stt0 + dk2tt(x, k) data = {} if not peaks: if not scans: raise UsageError('please give either scans or peaks argument') for dataset in session.experiment.data.getLastScans(): num = dataset.counter if num not in scans: continue res = _extract_powder_data(num, dataset) session.log.debug('powder_data from %d: %s', num, res) if res: ki, peaks = res data.setdefault(ki, []).extend([None, p, dp, '#%d ' % num] for (p, dp) in peaks) if not data: session.log.warning('no data found, check the scan numbers!') return else: if scans: raise UsageError('please give either scans or peaks argument') if not ki: raise UsageError('please give ki argument together with peaks') data[float(ki)] = [[None, p, 0.1, ''] for p in peaks] beststt0s = [] bestmtt0s = [] bestrms = 1.0 bestlines = [] orig_data = data for j in [0] + [i * s for i in range(1, ksteps) for s in (-1, 1)]: out = [] p = out.append data = deepcopy(orig_data) # now iterate through data (for all ki and for all peaks) and try to # assign a d-value assuming the ki not to be completely off! for ki1 in sorted(data): new_ki = ki1 + j*dki*ki1 # iterate over ki specific list, start at last element for el in reversed(data[ki1]): tdval = pi/new_ki/sin(abs(radians(el[1]/2.))) # dvalue from scan distances = [(abs(d-tdval), i) for (i, d) in enumerate(dvals)] mindist = min(distances) if mindist[0] > maxdd: p('%speak at %7.3f -> no hkl found' % (el[3], el[1])) data[ki1].remove(el) else: el[0] = dvals[mindist[1]] if el[1] < 0: el[0] *= -1 p('%speak at %7.3f could be %s at d = %-7.4f' % (el[3], el[1], dhkls[abs(el[0])], el[0])) p('') restxt = [] restxt.append('___final_results___') restxt.append('ki_exp #peaks | ki_fit dki_fit mtt_0 lambda | ' 'stt_0 dstt_0 | chisqr') stt0s = [] mtt0s = [] rms = 0 for ki1 in sorted(data): new_ki = ki1 + j*dki*ki1 peaks = data[ki1] failed = True if len(peaks) > 2: fit = Fit('ki', model, ['ki', 'stt0'], [new_ki, 0]) res = fit.run([el[0] for el in peaks], [el[1] for el in peaks], [el[2] for el in peaks]) failed = res._failed if failed: restxt.append('%4.3f %-6d | No fit!' % (ki1, len(peaks))) rms += 1e6 continue mtt0 = dk2tt(dmono, res.ki) - dk2tt(dmono, ki1) restxt.append('%5.3f %-6d | %-7.4f %-7.4f %-7.4f %-7.4f | ' '%-7.4f %-7.4f | %.2f' % (ki1, len(peaks), res.ki, res.dki, mtt0, 2*pi/res.ki, res.stt0, res.dstt0, res.chi2)) stt0s.append(res.stt0) mtt0s.append(mtt0) peaks_fit = [model(el[0], res.ki, res.stt0) for el in peaks] p('___fitted_peaks_for_ki=%.3f___' % ki1) p('peak dval measured fitpos delta') for i, el in enumerate(peaks): p('%-10s %7.3f %7.3f %7.3f %7.3f%s' % ( dhkls[abs(el[0])], el[0], el[1], peaks_fit[i], peaks_fit[i] - el[1], '' if abs(peaks_fit[i] - el[1]) < 0.10 else " **")) p('') rms += sum((pobs - pfit)**2 for (pobs, pfit) in zip([el[1] for el in peaks], peaks_fit)) / len(peaks) out.extend(restxt) session.log.debug('') session.log.debug('-' * 80) session.log.debug('result from run with j=%d (RMS = %g):', j, rms) for line in out: session.log.debug(line) if rms < bestrms: beststt0s = stt0s bestmtt0s = mtt0s bestrms = rms bestlines = out session.log.debug('') session.log.debug('*** new best result: RMS = %g', rms) if not beststt0s: session.log.warning('No successful fit results!') if ki is not None: session.log.warning('Is the initial guess for ki too far off?') return for line in bestlines: session.log.info(line) meanstt0 = sum(beststt0s)/len(beststt0s) meanmtt0 = sum(bestmtt0s)/len(bestmtt0s) session.log.info('Check errors (dki, dstt0)! RMS = %.3g', bestrms) session.log.info('') session.log.info('Adjust using:') # TODO: fix suggestions using adjust() session.log.info('mtt.offset += %.4f', meanmtt0) session.log.info('mth.offset += %.4f', meanmtt0 / 2) session.log.info('stt.offset += %.4f', meanstt0) return CommandLineFitResult((meanmtt0, meanstt0))
5,335,044
def replace_me(value, as_comment=False): """ ** ATTENTION ** CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS. Replaces the current souce code line with the given `value`, while keeping the indentation level. If `as_comment` is True, then `value` is inserted as a Python comment and pretty-printed. Because inserting multi-line values changes the following line numbers, don't mix multiple calls to `replace_me` with multi-line values. """ caller = getframeinfo(stack()[1][0]) if caller.filename == '<stdin>': raise ValueError("Can't use `replace_me` module in interactive interpreter.") with open(caller.filename, 'r+', encoding='utf-8') as f: lines = f.read().split('\n') spaces, = re.match(r'^(\s*)', lines[caller.lineno-1]).groups() if as_comment: if not isinstance(value, str): value = pformat(value, indent=4) value_lines = value.rstrip().split('\n') value_lines = (spaces + '# ' + l for l in value_lines) else: value_lines = (spaces + l for l in str(value).split('\n')) lines[caller.lineno-1] = '\n'.join(value_lines) f.seek(0) f.truncate() f.write('\n'.join(lines))
5,335,045
def get_neighbor_v6_by_ids(obj_ids): """Return NeighborV6 list by ids. Args: obj_ids: List of Ids of NeighborV6's. """ ids = list() for obj_id in obj_ids: try: obj = get_neighbor_v6_by_id(obj_id).id ids.append(obj) except exceptions.NeighborV6DoesNotExistException as e: raise api_rest_exceptions.ObjectDoesNotExistException(str(e)) except Exception as e: raise api_rest_exceptions.NetworkAPIException(str(e)) return NeighborV6.objects.filter(id__in=ids)
5,335,046
def decode_b64_to_image(b64_str: str) -> [bool, np.ndarray]: """解码base64字符串为OpenCV图像, 适用于解码三通道彩色图像编码. :param b64_str: base64字符串 :return: ok, cv2_image """ if "," in b64_str: b64_str = b64_str.partition(",")[-1] else: b64_str = b64_str try: img = base64.b64decode(b64_str) return True, cv2.imdecode(np.frombuffer(img, dtype=np.int8), 1) except cv2.error: return False, None
5,335,047
def check_propag_dists(dims, dx, t, v_min, v_max, f_min, f_max): """ Calculate propagation distance across the model covered by the fastest waves. Parameters ---------- dims : tuple dx : float Size of the spatial grid cell [m]. t : s f_min : float Min. frequency present in the source function. f_max : float Max. frequency present in the source function. v_min : float Min. vel. of the true model. v_max : float Max. vel. of the true model. Returns ------- dist : float Distance in metres. Notes ----- """ nx1, nx2, nx3 = dims assert f_min > 0 and f_max > 0 shortest_wavelength = v_min / f_max longest_wavelength = v_max / f_min d1 = v_max * t # dist, m d2 = v_max * t / shortest_wavelength # dist_per_shortest_wavelength d3 = v_max * t / longest_wavelength # dist_per_longest_wavelength d4 = v_min * t / dx # dist_in_nodes d5 = d4 / nx1 # dist_as_fraction_nx1 d6 = d4 / nx2 # dist_as_fraction_nx2 d7 = d4 / nx3 # dist_as_fraction_nx3 text = '\n\n' text += 'Assuming t = ' + str(t) + ' s, the fastest wave will cover ' text += "{0:.1f}".format(d1) + ' m, which corresponds to: \n' text += "{0:6.1f}".format(d2) + ' shortest wavelengths \n' text += "{0:6.1f}".format(d3) + ' longest wavelengths \n' text += "{0:6.1f}".format(d4) + ' nodes \n' text += "{0:6.1f}".format(d5) + ' model-sizes in X direction \n' text += "{0:6.1f}".format(d6) + ' model-sizes in Y direction \n' text += "{0:6.1f}".format(d7) + ' model-sizes in Z direction \n' #.ljust(7, ' ') check_propag_dists._log.info(text)
5,335,048
def _get_index_videos(course, pagination_conf=None): """ Returns the information about each video upload required for the video list """ course_id = str(course.id) attrs = [ 'edx_video_id', 'client_video_id', 'created', 'duration', 'status', 'courses', 'transcripts', 'transcription_status', 'error_description' ] def _get_values(video): """ Get data for predefined video attributes. """ values = {} for attr in attrs: if attr == 'courses': course = [c for c in video['courses'] if course_id in c] (__, values['course_video_image_url']), = list(course[0].items()) else: values[attr] = video[attr] return values videos, pagination_context = _get_videos(course, pagination_conf) return [_get_values(video) for video in videos], pagination_context
5,335,049
def launch_emulator_win(emulator_id='Pixel_4_API_26', debug=False): """ This for **Windows OS** The Function will launch the emulator for testing if needed. If id is not explicitly provided then the default *Pixel-4 emulator with API 26* will launch. :param emulator_id: pass id of the emulator that you want to start. :param debug: pass true for enabling debugging outputs. :return: None. """ if debug: print('[D] Launching Emulator with id:{}'.format(emulator_id)) emu = os.popen('Flutter emulators --launch {}'.format(emulator_id)) print(emu.read())
5,335,050
def get_user_by_api_key(api_key, active_only=False): """ Get a User object by api_key, whose attributes match those in the database. :param api_key: API key to query by :param active_only: Set this flag to True to only query for active users :return: User object for that user ID :raises UserDoesNotExistException: If no user exists with the given user_id """ if active_only: user = models.User.query.filter_by(api_key=api_key, is_active=True).first() else: user = models.User.query.filter_by(api_key=api_key).first() if not user: raise UserDoesNotExistException('No user with api_key {api_key} exists'.format(api_key=api_key)) return user
5,335,051
def get_pixel_values_of_line(img, x0, y0, xf, yf): """ get the value of a line of pixels. the line defined by the user using the corresponding first and last pixel indices. Parameters ---------- img : np.array. image on a 2d np.array format. x0 : int raw number of the starting pixel y0 : int column number of the starting pixel. xf : int raw number of the ending pixel. yf : int column number of the ending pixel. Returns ------- line_pixel_values : np.array 1d np.array representing the values of the chosen line of pixels. """ rr, cc = np.array(draw.line(x0, y0, xf, yf)) # line_pixel_values = [img[rr[i], cc[i]] for i in range(len(rr))] line_pixel_values = img[rr, cc] return line_pixel_values
5,335,052
def _filter_out_variables_not_in_dataframe(X, variables): """Filter out variables that are not present in the dataframe. Function removes variables that the user defines in the argument `variables` but that are not present in the input dataframe. Useful when ussing several feature selection procedures in a row. The dataframe input to the first selection algorithm likely contains more variables than the input dataframe to subsequent selection algorithms, and it is not possible a priori, to say which variable will be dropped. Parameters ---------- X: pandas DataFrame variables: string, int or list of (strings or int). Returns ------- filtered_variables: List of variables present in `variables` and in the input dataframe. """ # When variables is not defined, keep it like this and return None. if variables is None: return None # If an integer or a string is provided, convert to a list. if not isinstance(variables, list): variables = [variables] # Filter out elements of variables that are not in the dataframe. filtered_variables = [var for var in variables if var in X.columns] # Raise an error if no column is left to work with. if len(filtered_variables) == 0: raise ValueError( "After filtering no variable remaining. At least 1 is required." ) return filtered_variables
5,335,053
def file_format(input_files): """ Takes all input files and checks their first character to assess the file format. 3 lists are return 1 list containing all fasta files 1 containing all fastq files and 1 containing all invalid files """ fasta_files = [] fastq_files = [] invalid_files = [] # Open all input files and get the first character for infile in input_files: try: f = gzip.open(infile, "rb") fst_char = f.read(1) except OSError: f = open(infile, "rb") fst_char = f.read(1) f.close() #fst_char = f.readline().decode("ascii")[0] #print(fst_char) # Return file format based in first char if fst_char == b'@': fastq_files.append(infile) elif fst_char == b'>': fasta_files.append(infile) else: invalid_files.append(infile) return (fasta_files, fastq_files, invalid_files)
5,335,054
def autodiscover(): """ Goes and imports the permissions submodule of every app in INSTALLED_APPS to make sure the permission set classes are registered correctly. """ global LOADING if LOADING: return LOADING = True import imp from django.conf import settings for app in settings.INSTALLED_APPS: try: __import__(app) app_path = sys.modules[app].__path__ except AttributeError: continue try: imp.find_module('permissions', app_path) except ImportError: continue __import__("%s.permissions" % app) app_path = sys.modules["%s.permissions" % app] LOADING = False
5,335,055
def sub_vector(v1: Vector3D, v2: Vector3D) -> Vector3D: """Substract vector V1 from vector V2 and return resulting Vector. Keyword arguments: v1 -- Vector 1 v2 -- Vector 2 """ return [v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]]
5,335,056
def create_validity_dict(validity_period): """Convert a validity period string into a dict for issue_certificate(). Args: validity_period (str): How long the signed certificate should be valid for Returns: dict: A dict {"Value": number, "Type": "string" } representation of the validity period """ validity_suffix = validity_period[-1:] if validity_suffix == "d": validity_unit = "DAYS" elif validity_suffix == "m": validity_unit = "MONTHS" elif validity_suffix == "y": validity_unit = "YEARS" return {"Value": int(validity_period[:-1]), "Type": validity_unit}
5,335,057
def analyse_latency(cid): """ Parse the resolve_time and download_time info from cid_latency.txt :param cid: cid of the object :return: time to resolve the source of the content and time to download the content """ resolve_time = 0 download_time = 0 with open(f'{cid}_latency.txt', 'r') as stdin: for line in stdin.readlines(): """ The output of the ipfs get <cid> command is in the form of: Started: 02-19-2022 01:51:16 Resolve Ended: 02-19-2022 01:51:16 Resolve Duraution: 0.049049 Download Ended: 02-19-2022 01:51:16 Download Duraution: 0.006891 Total Duraution: 0.055940 """ if "Resolve Duraution:" in line: resolve_time = line.split(": ")[1] resolve_time = resolve_time.split("\n")[0] if "Download Duraution:" in line: download_time = line.split(": ")[1] download_time = download_time.split("\n")[0] return resolve_time, download_time
5,335,058
def run_module(): """ Main Ansible module function """ # Module argument info module_args = { 'facts': { 'type': 'bool', 'required': False, 'default': FACTS_DEFAULT }, 'facts_verbose': { 'type': 'bool', 'required': False, 'default': FACTS_VERBOSE_DEFAULT }, 'facts_key': { 'type': 'str', 'required': False, 'default': FACTS_KEY_DEFAULT } } # Seed result value result = { 'changed': False, 'failed': False, 'msg': '' } # Lean on boilerplate code in AnsibleModule class module = AnsibleModule( argument_spec=module_args, supports_check_mode=False ) # Run logic # NOTE: This module does not support check mode right now so no special check handling err, result = run_normal(module.params, result) # Exit module.exit_json(**result)
5,335,059
def process_ref(paper_id): """Attempt to extract arxiv id from a string""" # if user entered a whole url, extract only the arxiv id part paper_id = re.sub("https?://arxiv\.org/(abs|pdf|ps)/", "", paper_id) paper_id = re.sub("\.pdf$", "", paper_id) # strip version paper_id = re.sub("v[0-9]+$", "", paper_id) # remove leading arxiv, i.e., such that paper_id=' arXiv: 2001.1234' is still valid paper_id = re.sub("^\s*arxiv[:\- ]", "", paper_id, flags=re.IGNORECASE) return paper_id
5,335,060
def augment_test_func(test_func): """Augment test function to parse log files. `tools.create_tests` creates functions that run an LBANN experiment. This function creates augmented functions that parse the log files after LBANN finishes running, e.g. to check metrics or runtimes. Note: The naive approach is to define the augmented test functions in a loop. However, Python closures are late binding. In other words, the function would be overwritten every time we define it. We get around this overwriting problem by defining the augmented function in the local scope of another function. Args: test_func (function): Test function created by `tools.create_tests`. Returns: function: Test that can interact with PyTest. """ test_name = test_func.__name__ # Define test function def func(cluster, dirname): # Run LBANN experiment experiment_output = test_func(cluster, dirname) # Parse LBANN log file train_accuracy = None gpu_usage = None mini_batch_times = [] gpu_usages = [] with open(experiment_output['stdout_log_file']) as f: for line in f: match = re.search('training epoch [0-9]+ objective function : ([0-9.]+)', line) if match: train_accuracy = float(match.group(1)) match = re.search('training epoch [0-9]+ mini-batch time statistics : ([0-9.]+)s mean', line) if match: mini_batch_times.append(float(match.group(1))) match = re.search('GPU memory usage statistics : ([0-9.]+) GiB mean', line) if match: gpu_usages.append(float(match.group(1))) # Check if training accuracy is within expected range assert (expected_accuracy_range[0] < train_accuracy <expected_accuracy_range[1]), \ 'train accuracy is outside expected range' #Only tested on Ray. Skip if mini-batch test on another cluster. Change this when mini-batch values are available for other clusters # Check if mini-batch time is within expected range # Note: Skip first epoch since its runtime is usually an outlier mini_batch_times = mini_batch_times[1:] mini_batch_time = sum(mini_batch_times) / len(mini_batch_times) assert (0.75 * expected_mini_batch_times[cluster] < mini_batch_time < 1.25 * expected_mini_batch_times[cluster]), \ 'average mini-batch time is outside expected range' # Check for GPU usage and memory leaks # Note: Skip first epoch gpu_usages = gpu_usages[1:] gpu_usage = sum(gpu_usages)/len(gpu_usages) assert (0.75 * expected_gpu_usage[cluster] < gpu_usage < 1.25 * expected_gpu_usage[cluster]),\ 'average gpu usage is outside expected range' # Return test function from factory function func.__name__ = test_name return func
5,335,061
def test_invoice_get_invoices(session): """Assert that get_invoices works.""" payment_account = factory_payment_account() payment = factory_payment() payment_account.save() payment.save() i = factory_invoice(payment_id=payment.id, account_id=payment_account.id) i.save() invoices = Invoice_service.get_invoices(payment.id, skip_auth_check=True) assert invoices is not None assert len(invoices.get('items')) == 1 assert not invoices.get('items')[0].get('line_items')
5,335,062
def get_cuda_arch_flags(cflags): """ For an arch, say "6.1", the added compile flag will be ``-gencode=arch=compute_61,code=sm_61``. For an added "+PTX", an additional ``-gencode=arch=compute_xx,code=compute_xx`` is added. """ # TODO(Aurelius84): return []
5,335,063
def nstep_td(env, pi, alpha=1, gamma=1, n=1, N_episodes=1000, ep_max_length=1000): """Evaluates state-value function with n-step TD Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 144 Args: env: Environment pi: Policy alpha: Step size gamma: Discount factor n: Number of steps N_episodes: Run this many episodes ep_max_length: Force termination of episode after this number of steps Returns: v: State-value function """ v = defaultdict(lambda: 0) for i_episode in range(N_episodes): print("\r> N-step TD: Episode {}/{}".format( i_episode+1, N_episodes), end="") state = env.reset() rewards = [0] states = [] t = 0 T = np.inf done = False while t < T and t < ep_max_length: if not done: action = select_action_policy(pi, state) state_new, reward, done, info = env.step(action) rewards.append(reward) states.append(state) state = state_new if done: T = t+n+1 if t-n >= 0: G = 0 for i in range(min(n,T-t)): G += gamma**i * rewards[t-n+1+i] if t < T-n: G += gamma**n * v[states[t]] v[states[t-n]] += alpha*(G - v[states[t-n]]) t += 1 print() return v
5,335,064
def open_file(path): """more robust open function""" return open(path, encoding='utf-8')
5,335,065
def main(): """Create the README.md file from template and code documentation.""" shell_commands, shell_commands_toc = _create_commands_docs() configuration, configuration_toc = _create_configuration_docs() print(f"Rendering documentation for v{VERSION}") output = templating.render( "../docs/README.template.md", configuration=configuration, configuration_toc=configuration_toc, shell_commands=shell_commands, shell_commands_toc=shell_commands_toc, version=VERSION, ).replace("\r", "") pathlib.Path(__file__).parent.joinpath("README.md").write_text(output)
5,335,066
def test_subscribe(env): """Check async. interrupt if a process terminates.""" def child(env): yield env.timeout(3) return 'ohai' def parent(env): child_proc = env.process(child(env)) subscribe_at(child_proc) try: yield env.event() except Interrupt as interrupt: assert interrupt.cause[0] is child_proc assert interrupt.cause[1] == 'ohai' assert env.now == 3 env.process(parent(env)) env.run()
5,335,067
def _run(data_iterator, iterations): """ループして速度を見るための処理。""" with tk.utils.tqdm(total=batch_size * iterations, unit="f") as pbar: while True: for X_batch, y_batch in data_iterator: assert len(X_batch) == batch_size assert len(y_batch) == batch_size pbar.update(len(X_batch)) data_iterator.on_epoch_end() if pbar.n >= pbar.total: break
5,335,068
def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ import tempfile fd, fname = tempfile.mkstemp('.cpp', 'main', text=True) with os.fdopen(fd, 'w') as f: f.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([fname], extra_postargs=[flagname]) except setuptools.distutils.errors.CompileError: return False return True
5,335,069
def _parse_data(f, dtype, shape): """Parses the data.""" dtype_big = np.dtype(dtype).newbyteorder(">") count = np.prod(np.array(shape)) # See: https://github.com/numpy/numpy/issues/13470 use_buffer = type(f) == gzip.GzipFile if use_buffer: data = np.frombuffer(f.read(), dtype_big, count) else: data = np.fromfile(f, dtype_big, count) return data.astype(dtype).reshape(shape)
5,335,070
def test_target(target # type: Any ): """ A simple decorator to declare that a case function is associated with a particular target. >>> @test_target(int) >>> def case_to_test_int(): >>> ... This is actually an alias for `@case_tags(target)`, that some users may find a bit more readable. :param target: for example a function, a class... or a string representing a function, a class... :return: """ return case_tags(target)
5,335,071
def plot_spatial(adata, color, img_key="hires", show_img=True, **kwargs): """Plot spatial abundance of cell types (regulatory programmes) with colour gradient and interpolation (from Visium anndata). This method supports only 7 cell types with these colours (in order, which can be changed using reorder_cmap). 'yellow' 'orange' 'blue' 'green' 'purple' 'grey' 'white' :param adata: adata object with spatial coordinates in adata.obsm['spatial'] :param color: list of adata.obs column names to be plotted :param kwargs: arguments to plot_spatial_general :return: matplotlib figure """ if show_img is True: kwargs["show_img"] = True kwargs["img"] = list(adata.uns["spatial"].values())[0]["images"][img_key] # location coordinates if "spatial" in adata.uns.keys(): kwargs["coords"] = ( adata.obsm["spatial"] * list(adata.uns["spatial"].values())[0]["scalefactors"][f"tissue_{img_key}_scalef"] ) else: kwargs["coords"] = adata.obsm["spatial"] fig = plot_spatial_general(value_df=adata.obs[color], **kwargs) # cell abundance values return fig
5,335,072
def stats_by_group(df): """Calculate statistics from a groupby'ed dataframe with TPs,FPs and FNs.""" EPSILON = 1e-10 result = df[['tp', 'fp', 'fn']].sum().reset_index().assign( precision=lambda x: (x['tp'] + EPSILON) / (x['tp'] + x['fp'] + EPSILON), recall=lambda x: (x['tp'] + EPSILON) / (x['tp'] + x['fn'] + EPSILON)).assign( f1=lambda x: 2 * x['precision'] * x['recall'] / (x['precision'] + x['recall'] + EPSILON), count=lambda x: x['tp'] + x['fn']) result['proportion'] = result['count'] / np.sum(result['count']) result['proportion_text'] = (result['proportion'] * 100).round(2).astype(str) + "%" return result
5,335,073
def uncolorize(text): """ Attempts to remove color and reset flags from text via regex pattern @text: #str text to uncolorize -> #str uncolorized @text .. from redis_structures.debug import uncolorize uncolorize('\x1b[0;34mHello world\x1b[1;m') # -> 'Hello world' .. """ return _find_colors.sub("", text)
5,335,074
def in_this_prow(prow): """ Returns a bool describing whether this processor inhabits `prow`. Args: prow: The prow. Returns: The bool. """ return prow == my_prow()
5,335,075
def _keypair_from_file(key_pair_file: str) -> Keypair: """Returns a Solana KeyPair from a file""" with open(key_pair_file) as kpf: keypair = kpf.read() keypair = keypair.replace("[", "").replace("]", "") keypair = list(keypair.split(",")) keypair = [int(i) for i in keypair] return Keypair(keypair[:32])
5,335,076
def val_to_bitarray(val, doing): """Convert a value into a bitarray""" if val is sb.NotSpecified: val = b"" if type(val) is bitarray: return val if type(val) is str: val = binascii.unhexlify(val.encode()) if type(val) is not bytes: raise BadConversion("Couldn't get bitarray from a value", value=val, doing=doing) b = bitarray(endian="little") b.frombytes(val) return b
5,335,077
def unpack_uint64_from(buf, offset=0): """Unpack a 64-bit unsigned integer from *buf* at *offset*.""" return _uint64struct.unpack_from(buf, offset)[0]
5,335,078
def del_none(dictionary): """ Recursively delete from the dictionary all entries which values are None. Args: dictionary (dict): input dictionary Returns: dict: output dictionary Note: This function changes the input parameter in place. """ for key, value in list(dictionary.items()): if value is None: del dictionary[key] elif isinstance(value, dict): del_none(value) return dictionary
5,335,079
def get_model(model_dir, suffix=""): """return model file, model spec object, and list of extra data items this function will get the model file, metadata, and extra data the returned model file is always local, when using remote urls (such as v3io://, s3://, store://, ..) it will be copied locally. returned extra data dict (of key, DataItem objects) allow reading additional model files/objects e.g. use DataItem.get() or .download(target) .as_df() to read example:: model_file, model_artifact, extra_data = get_model(models_path, suffix='.pkl') model = load(open(model_file, "rb")) categories = extra_data['categories'].as_df() :param model_dir: model dir or artifact path (store://..) or DataItem :param suffix: model filename suffix (when using a dir) :returns: model filename, model artifact object, extra data dict """ model_file = "" model_spec = None extra_dataitems = {} suffix = suffix or ".pkl" if hasattr(model_dir, "artifact_url"): model_dir = model_dir.artifact_url if is_store_uri(model_dir): model_spec, target = store_manager.get_store_artifact(model_dir) if not model_spec or model_spec.kind != "model": raise ValueError(f"store artifact ({model_dir}) is not model kind") model_file = _get_file_path(target, model_spec.model_file) extra_dataitems = _get_extra(target, model_spec.extra_data) elif model_dir.lower().endswith(".yaml"): model_spec = _load_model_spec(model_dir) model_file = _get_file_path(model_dir, model_spec.model_file) extra_dataitems = _get_extra(model_dir, model_spec.extra_data) elif model_dir.endswith(suffix): model_file = model_dir else: dirobj = store_manager.object(url=model_dir) model_dir_list = dirobj.listdir() if model_spec_filename in model_dir_list: model_spec = _load_model_spec(path.join(model_dir, model_spec_filename)) model_file = _get_file_path(model_dir, model_spec.model_file, isdir=True) extra_dataitems = _get_extra(model_dir, model_spec.extra_data, is_dir=True) else: extra_dataitems = _get_extra( model_dir, {v: v for v in model_dir_list}, is_dir=True ) for file in model_dir_list: if file.endswith(suffix): model_file = path.join(model_dir, file) break if not model_file: raise ValueError(f"cant resolve model file for {model_dir} suffix{suffix}") obj = store_manager.object(url=model_file) if obj.kind == "file": return model_file, model_spec, extra_dataitems temp_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name obj.download(temp_path) return temp_path, model_spec, extra_dataitems
5,335,080
def test_attached_solr_export_records(et_code, rset_code, basic_exporter_class, record_sets, new_exporter, assert_all_exported_records_are_indexed): """ For AttachedRecordExporter classes that load data into Solr, the `export_records` method should load the expected records into the expected Solr indexes. This is just a simple check to make sure all child exporters processed the appropriate recordsets; the children are tested more extensively elsewhere. """ records = record_sets[rset_code] expclass = basic_exporter_class(et_code) exporter = new_exporter(expclass, 'full_export', 'waiting') exporter.export_records(records) for child in exporter.children.values(): child.commit_indexes() assert_all_exported_records_are_indexed(exporter, records)
5,335,081
def get_table_header(driver): """Return Table columns in list form """ header = driver.find_elements(By.TAG_NAME, value= 'th') header_list = [item.text for index, item in enumerate(header) if index < 10] return header_list
5,335,082
def ref_count() -> Callable[[ConnectableObservable], Observable]: """Returns an observable sequence that stays connected to the source as long as there is at least one subscription to the observable sequence. """ from rx.core.operators.connectable.refcount import _ref_count return _ref_count()
5,335,083
async def test_edit(): """Test editing a paste.""" p = create_paste("test_edit") with pytest.raises(ValueError): await p.edit(content="NI") content = p.content token = await p.save(site=TEST_SITE) assert p.id is not None assert p._token == token await p.edit(get_content("test_edit_post")) assert p.content != content p._token = None with pytest.raises(ValueError): await p.edit(content="NI") await p.delete(token)
5,335,084
def teach(fn, collection): """Concurrently digest each item in the collection with the provided function. >>> tmap(save_to_disk, documents) """ threads = [threading.Thread(target=fn, args=(item,)) for item in collection] each("start", threads) each("join", threads)
5,335,085
def no_dry_run(f): """A decorator which "disables" a function during a dry run. A can specify a `dry_run` option in the `devel` section of `haas.cfg`. If the option is present (regardless of its value), any function or method decorated with `no_dry_run` will be "disabled." The call will be logged (with level `logging.DEBUG`), but will not actually execute. The function will instead return 'None'. Callers of decorated functions must accept a None value gracefully. The intended use case of `no_dry_run` is to disable functions which cannot be run because, for example, the HaaS is executing on a developer's workstation, which has no configured switch, libvirt, etc. If the `dry_run` option is not specified, this decorator has no effect. """ @wraps(f) def wrapper(*args, **kwargs): if have_dry_run(): logger = logging.getLogger(__name__) logger.info('dry run, not executing: %s.%s(*%r,**%r)' % (f.__module__, f.__name__, args, kwargs)) return None else: return f(*args, **kwargs) return wrapper
5,335,086
def radius_provider_modify(handle, name, **kwargs): """ modifies a radius provider Args: handle (UcsHandle) name (string): radius provider name **kwargs: key-value pair of managed object(MO) property and value, Use 'print(ucscoreutils.get_meta_info(<classid>).config_props)' to get all configurable properties of class Returns: AaaRadiusProvider: managed object Raises: UcsOperationError: if AaaRadiusProvider is not present Example: radius_provider_modify(handle, name="test_radius_prov", timeout="5") """ mo = radius_provider_get(handle, name, caller="radius_provider_modify") mo.set_prop_multiple(**kwargs) handle.set_mo(mo) handle.commit() return mo
5,335,087
def main(): """ Main function. """ logging.basicConfig(level=logging.INFO) args = parse_args() logging.info(f'Commandline:\n{" ".join(sys.argv)}') cfg = Config.fromfile(args.config) update_config = f' --update_config {args.update_config}' if args.update_config else '' if is_clustering_needed(cfg): update_config = cluster(cfg, args.config, update_config) logging.info('Training started ...') training_info = train(args.config, args.gpu_num, update_config) logging.info('... training completed.') work_dir = get_work_dir(cfg, args.update_config) logging.info('Evaluation started ...') evaluate(os.path.join(work_dir, "config.py"), os.path.join(work_dir, "latest.pth"), args.out, '', args.show_dir) logging.info('... evaluation completed.') with open(args.out, 'a+') as dst_file: yaml.dump(training_info, dst_file)
5,335,088
async def test_failed_to_log_in(mock_login, mock_logout, hass): """Testing exception at login results in False.""" from pexpect import exceptions conf_dict = { DOMAIN: { CONF_PLATFORM: "unifi_direct", CONF_HOST: "fake_host", CONF_USERNAME: "fake_user", CONF_PASSWORD: "fake_pass", CONF_PORT: 22, CONF_TRACK_NEW: True, CONF_CONSIDER_HOME: timedelta(seconds=180), } } mock_login.side_effect = exceptions.EOF("Test") scanner = get_scanner(hass, conf_dict) assert not scanner
5,335,089
def _read_table(table_node): """Return a TableData object for the 'table' element.""" header = [] rows = [] for node in table_node: if node.tag == "th": if header: raise ValueError("cannot handle multiple headers") elif rows: raise ValueError("encountered header after rows") else: header = node.text.strip() elif node.tag == "tr": rows.append(node.text.strip()) return create_table(header, rows)
5,335,090
def test_cascade_action_record_delete(app, db, location, record, generic_file, force, num_of_recordbuckets): """Test cascade action on record delete, with force false.""" record_id = record.id bucket_id = record.files.bucket.id # check before assert len(RecordsBuckets.query.all()) == 1 assert len(Bucket.query.all()) == 1 assert len(Bucket.query.filter_by(id=bucket_id).all()) == 1 assert ObjectVersion.get(bucket=bucket_id, key=generic_file) record.delete(force=force) # check after db.session.expunge(record.model) with pytest.raises(NoResultFound): record = Record.get_record(record_id) assert len(RecordsBuckets.query.all()) == num_of_recordbuckets assert len(Bucket.query.all()) == 1 assert len(Bucket.query.filter_by(id=bucket_id).all()) == 1 assert ObjectVersion.get(bucket=bucket_id, key=generic_file)
5,335,091
def postorder(root: Node): """ Post-order traversal visits left subtree, right subtree, root node. >>> postorder(make_tree()) [4, 5, 2, 3, 1] """ return postorder(root.left) + postorder(root.right) + [root.data] if root else []
5,335,092
def merge_triangulations(groups): """ Each entry of the groups list is a list of two (or one) triangulations. This function takes each pair of triangulations and combines them. Parameters ---------- groups : list List of pairs of triangulations Returns ------- list List of merged triangulations """ triangulations = [] for group in groups: if len(group)==2: # Find the first edges to connect the seperate triangulations ldi, rdi = lowest_common_tangent(group[0], group[1]) # Combine the two hulls into a single set of edges base, d_triang = combine_triangulations(ldi, rdi, group[0], group[1]) # Given the starting base edge, fill in the edges between the hulls d_triang = zip_hulls(base, d_triang) triangulations.append(d_triang) else: triangulations.append(group[0]) return [triangulations[i:i+2] for i in range(0, len(triangulations), 2)]
5,335,093
def make_vrt_list(feat_list, band=None): """ take a list of stac features and band(s) names and build gdal friendly vrt xml objects in list. band : list, str Can be a list or string of name of band(s) required. """ # imports from lxml import etree as et from rasterio.crs import CRS from rasterio.transform import Affine # check if band provided, if so and is str, make list if band is None: bands = [] elif not isinstance(band, list): bands = [band] else: bands = band # check features type, length if not isinstance(feat_list, list): raise TypeError('Features must be a list of xml objects.') elif not len(feat_list) > 0: raise ValueError('No features provided.') # set list vrt of each scene vrt_list = [] # iter stac scenes, build a vrt for feat in feat_list: # get scene identity and properties f_id = feat.get('id') f_props = feat.get('properties') # get scene-level date f_dt = f_props.get('datetime') # get scene-level x, y parameters f_x_size = f_props.get('proj:shape')[1] f_y_size = f_props.get('proj:shape')[0] # get scene-level epsg src as wkt f_srs = CRS.from_epsg(f_props.get('proj:epsg')) f_srs = f_srs.wkt #from osgeo.osr import SpatialReference #osr_crs = SpatialReference() #osr_crs.ImportFromEPSG(f_props.get('proj:epsg')) #f_srs = osr_crs.ExportToWkt() # get scene-level transform #from affine import Affine aff = Affine(*f_props.get('proj:transform')[0:6]) f_transform = ', '.join(str(p) for p in Affine.to_gdal(aff)) # build a top-level vrt dataset xml object xml_ds = satfetcher.make_vrt_dataset_xml(x_size=f_x_size, y_size=f_y_size, axis_map='1,2', # hardcoded srs=f_srs, trans=f_transform) # iterate bands and build raster vrts band_idx = 1 for band in bands: if band in feat.get('assets'): # get asset asset = feat.get('assets').get(band) # set dtype to int16... todo bug in rasterio with int8? #a_dtype = 'UInt8' if band == 'oa_fmask' else 'Int16' a_dtype = 'Int16' # get asset raster x, y sizes a_x_size = asset.get('proj:shape')[1] a_y_size = asset.get('proj:shape')[0] # get raster url, replace s3 with https a_url = asset.get('href') a_url = a_url.replace('s3://dea-public-data', 'https://data.dea.ga.gov.au') # get nodata value a_nodata = 0 if band == 'oa_fmask' else -999 # build raster xml xml_rast = satfetcher.make_vrt_raster_xml(x_size=a_x_size, y_size=a_y_size, dtype=a_dtype, band_num=band_idx, nodata=a_nodata, dt=f_dt, rel_to_vrt=0, # hardcoded url=a_url, src_band=1) # hardcoded # append raster xml to vrt dataset xml xml_ds.append(xml_rast) # increase band index band_idx += 1 # decode to utf-8 string and append to vrt list xml_ds = et.tostring(xml_ds).decode('utf-8') vrt_list.append(xml_ds) return vrt_list
5,335,094
def make_dataset(list_file, outdir, categories_path, featurizer_path, sample_rate, window_size, shift, auto_scale=True, noise_path=None, max_noise_ratio=0.1, noise_selection=0.1, use_cache=False): """ Create a dataset given the input list file, a featurizer, the desired .wav sample rate, classifier window_size and window shift amount. The dataset is saved to the same file name with .npz extension. This will do nothing if dataset is already created, unless use_cache=False. """ dataset_name = os.path.basename(list_file) dataset_path = os.path.splitext(dataset_name)[0] + ".npz" dataset_path = os.path.join(outdir, dataset_path) if use_cache and os.path.isfile(dataset_path): return transform = featurizer.AudioTransform(featurizer_path, 0) entry_map = parse_list_file(list_file) if not categories_path: categories_path = "categories.txt" if not os.path.isfile(categories_path): raise Exception("{} file not found".format(categories_path)) categories = [x.strip() for x in open(categories_path, 'r').readlines()] mixer = None if noise_path: noise_files = [os.path.join(noise_path, f) for f in os.listdir(noise_path) if os.path.splitext(f)[1] == ".wav"] mixer = noise_mixer.AudioNoiseMixer(noise_files, max_noise_ratio, noise_selection) dataset = _get_dataset(entry_map, categories, transform, sample_rate, window_size, shift, auto_scale, mixer) if len(dataset.features) == 0: print("No features found in list file") print("Saving: {}".format(dataset_path)) dataset.save(dataset_path)
5,335,095
def compute_relative_pose(cam_pose, ref_pose): """Compute relative pose between two cameras Args: cam_pose (np.ndarray): Extrinsic matrix of camera of interest C_i (3,4). Transforms points in world frame to camera frame, i.e. x_i = C_i @ x_w (taking into account homogeneous dimensions) ref_pose (np.ndarray): Extrinsic matrix of reference camera C_r (3,4) Returns: relative_pose (np.ndarray): Relative pose of size (3,4). Should transform points in C_r to C_i, i.e. x_i = M @ x_r Prohibited functions: Do NOT use np.linalg.inv() or similar functions """ relative_pose = np.zeros((3, 4), dtype=np.float64) """ YOUR CODE STARTS HERE """ Ri, Rr = cam_pose[:, :-1], ref_pose[:, :-1] ti, tr = cam_pose[:, -1:], ref_pose[:, -1:] relative_pose[:, :-1] = Ri @ Rr.T relative_pose[:, -1:] = ti - Ri @ Rr.T @ tr """ YOUR CODE ENDS HERE """ return relative_pose
5,335,096
def add_execute_parser(execute): """ ``execute`` command parser configuration """ execute.add_argument( '-d', '--deployment-id', required=True, help='A unique ID for the deployment') execute.add_argument( '-w', '--workflow', dest='workflow_id', help='The workflow to execute') execute.add_argument( '-p', '--parameters', dest='parameters', action='append', help='R|Parameters for the workflow execution\n' '(Can be provided as wildcard based paths (*.yaml, etc..) to YAML files,\n' 'a JSON string or as "key1=value1;key2=value2").\n' 'This argument can be used multiple times.') execute.add_argument( '--task-retries', dest='task_retries', type=int, help='How many times should a task be retried in case of failure') execute.add_argument( '--task-retry-interval', dest='task_retry_interval', default=1, type=int, help='How many seconds to wait before each task is retried')
5,335,097
def compat_chr(item): """ This is necessary to maintain compatibility across Python 2.7 and 3.6. In 3.6, 'chr' handles any unicode character, whereas in 2.7, `chr` only handles ASCII characters. Thankfully, the Python 2.7 method `unichr` provides the same functionality as 3.6 `chr`. :param item: a length 1 string who's `chr` method needs to be invoked :return: the unichr code point of the single character string, item """ if sys.version >= '3.0': return chr(item) else: return unichr(item)
5,335,098
def cli_cosmosdb_cassandra_table_update(client, resource_group_name, account_name, keyspace_name, table_name, default_ttl=None, schema=None, analytical_storage_ttl=None): """Update an Azure Cosmos DB Cassandra table""" logger.debug('reading Cassandra table') cassandra_table = client.get_cassandra_table(resource_group_name, account_name, keyspace_name, table_name) cassandra_table_resource = CassandraTableResource(id=table_name) cassandra_table_resource.default_ttl = cassandra_table.resource.default_ttl cassandra_table_resource.schema = cassandra_table.resource.schema cassandra_table_resource.analytical_storage_ttl = cassandra_table.resource.analytical_storage_ttl if _populate_cassandra_table_definition(cassandra_table_resource, default_ttl, schema, analytical_storage_ttl): logger.debug('replacing Cassandra table') cassandra_table_create_update_resource = CassandraTableCreateUpdateParameters( resource=cassandra_table_resource, options={}) return client.create_update_cassandra_table(resource_group_name, account_name, keyspace_name, table_name, cassandra_table_create_update_resource)
5,335,099