content
stringlengths
22
815k
id
int64
0
4.91M
def is_valid_dim(x: Any) -> bool: """determine if the argument will be valid dim when included in torch.Size. """ return isinstance(x, int) and x > 0
5,340,900
def compute_alpha(n, S_d, d_min): """ Approximate the alpha of a power law distribution. Parameters ---------- n: int or np.array of int Number of entries that are larger than or equal to d_min S_d: float or np.array of float Sum of log degrees in the distribution that are larger than or equal to d_min d_min: int The minimum degree of nodes to consider Returns ------- alpha: float The estimated alpha of the power law distribution """ return n / (S_d - n * np.log(d_min - 0.5)) + 1
5,340,901
def _head_object( s3_conn: S3Client, bucket: str, key: str ) -> Optional[HeadObjectOutputTypeDef]: """Retrieve information about an object in S3 if it exists. Args: s3_conn: S3 connection to use for operations. bucket: name of the bucket containing the key. key: name of the key to lookup. Returns: S3 object information, or None if the object does not exist. See the AWS documentation for explanation of the contents. Raises: botocore.exceptions.ClientError: any error from boto3 other than key not found is passed through. """ try: return s3_conn.head_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError as err: if err.response["Error"]["Code"] == "404": return None raise
5,340,902
def Residual(feat_maps_in, feat_maps_out, prev_layer): """ A customizable residual unit with convolutional and shortcut blocks Args: feat_maps_in: number of channels/filters coming in, from input or previous layer feat_maps_out: how many output channels/filters this block will produce prev_layer: the previous layer """ skip = skip_block(feat_maps_in, feat_maps_out, prev_layer) conv = conv_block(feat_maps_out, prev_layer) merged = add([skip, conv]) # the residual connection return LeakyReLU()(merged)
5,340,903
def test_p4(): """ Test some simple base cases using D with varying bases """ for i in range(2, 10): d = D([str(_) for _ in range(i)], [1/i]*i) d.set_base(i) yield assert_almost_equal, P(d), i
5,340,904
def clean_detail_line_data(detail_row: List[str], date: str) -> List[str]: """ :param detail_row: uncleaned detail row :param date: job data to be added to data :return: a cleaned list of details fields """ if not detail_row: print('detail_row:', detail_row) return detail_row # The age field is an integer number of days between the date when the video was uploaded and Feb.15, # 2007 (YouTube's establishment) age_field_location = 2 age_date_format = '%Y-%m-%d' age = int(detail_row[age_field_location].strip()) if detail_row[age_field_location].strip() else 0 new_date = datetime.strptime('2007-02-15', age_date_format) + timedelta(days=age) detail_row[age_field_location] = datetime.strftime(new_date, age_date_format) return [date, ] + detail_row
5,340,905
def lammps_prod(job): """Run npt ensemble production.""" in_script_name = "in.prod" modify_submit_lammps(in_script_name, job.sp) msg = f"sbatch submit.slurm {in_script_name} {job.sp.replica} {job.sp.temperature} {job.sp.pressure} {job.sp.cutoff}" return msg
5,340,906
def gather_tensors(tensors, indices): """Performs a tf.gather operation on a set of Tensors. Args: tensors: A potentially nested tuple or list of Tensors. indices: The indices to use for the gather operation. Returns: gathered_tensors: A potentially nested tuple or list of Tensors with the same structure as the 'tensors' input argument. Contains the result of applying tf.gather(x, indices) on each element x in 'tensors'. """ return map_nested(lambda x: tf.gather(x, indices), tensors)
5,340,907
def test_nldi(gage, loc, comid): """Test nldi functions used in processes. Args: gage (string): USGS Gage id string. loc (list): List of lon and lat comid (str): Expected comid returned from NLDI query. """ locarray = array(loc) gageloc = NLDI().getfeature_byid("nwissite", gage).to_crs("epsg:3857") cid = gageloc.comid.values.astype(str) strmseg_loc = NLDI().getfeature_byid("comid", cid[0]).to_crs("epsg:3857") print(strmseg_loc.comid[0]) assert strmseg_loc.comid[0] == comid gageloc_array = array([(gageloc.geometry[0].x, gageloc.geometry[0].y)]) assert_allclose(gageloc_array, locarray, rtol=0.1)
5,340,908
def delete_image_when_image_changed(sender, instance, **kwargs) -> None: """ Delete image if it was switched. :param sender: models.Model child class :param instance: sender instance :param kwargs: additional parameters :return: None """ # Don't run on initial save if not instance.pk: return for field in sender._meta.concrete_fields: if isinstance(field, models.ImageField): # its got a image field. Let's see if it changed try: instance_in_db = sender.objects.get(pk=instance.pk) except sender.DoesNotExist: # We are probably in a transaction and the PK is just temporary return instance_in_db_file_field = getattr(instance_in_db, field.name) instance_file_field = getattr(instance, field.name) if instance_in_db_file_field.name != instance_file_field.name: delete_file_if_unused( sender, instance, field, instance_in_db_file_field )
5,340,909
def _get_log_level(log_level_name): """ Get numeric log level corresponding to specified log level name """ # TODO: Is there a built-in method to do a reverse lookup? if log_level_name == LOG_LEVEL_NAME_CRITICAL: return logging.CRITICAL elif log_level_name == LOG_LEVEL_NAME_ERROR: return logging.ERROR elif log_level_name == LOG_LEVEL_NAME_WARNING: return logging.WARNING elif log_level_name == LOG_LEVEL_NAME_INFO: return logging.INFO elif log_level_name == LOG_LEVEL_NAME_DEBUG: return logging.DEBUG elif log_level_name == LOG_LEVEL_NAME_TRACE: return LOG_LEVEL_VALUE_TRACE return None
5,340,910
def __initialize_storage__(): # type: () -> None """ Initializes the dummy storage backend. Compiles the dummy storage backend from the tests sources. Sets the JAVA_API_JAR on the first call to this initialization. :return: None """ global JAVA_API_JAR global STORAGE_API current_path = os.path.dirname(os.path.abspath(__file__)) # Add python storage api to sys.path STORAGE_API = os.path.join(current_path, "..", "resources") sys.path.insert(0, STORAGE_API) if JAVA_API_JAR == "": # Compile jar jar_source_path = os.path.join(current_path, "..", "..", "..", "..", "..", "..", "..", "..", "utils", "storage") compile_command = ["mvn", "clean", "package"] process = subprocess.Popen(compile_command, stdout=subprocess.PIPE, cwd=jar_source_path) output, error = process.communicate() if error: print(output.decode()) print(error.decode()) # Set JAVA_API_JAR JAVA_API_JAR = os.path.join(jar_source_path, "dummyPSCO", "target", "compss-dummyPSCO.jar") print("Storage api jar: " + JAVA_API_JAR) # Set global environment os.environ["CLASSPATH"] = JAVA_API_JAR + ":" + os.environ["CLASSPATH"] os.environ["PYTHONPATH"] = STORAGE_API + ":" + os.environ["PYTHONPATH"] # Prepare temporary directory if os.path.exists(TEMP_DIR): shutil.rmtree(TEMP_DIR) os.mkdir(TEMP_DIR) # Prepare storage configuration if not os.path.exists(STORAGE_CONF): with open(STORAGE_CONF, "w") as fd: fd.write("localhost")
5,340,911
def show_report(total_time, total_files): """prints out brief report """ hours = total_time // 3600 minutes = (total_time % 3600) // 60 seconds = total_time % 60 print(""" file processed: {0} time taken: {1} hours {2} minutes {3} seconds the results are in the folder 'output_apobec' note: difference in the output files names is denoted by the time when script finished its work. --------------- ... job finished at {4} --------------- """.format(total_files, hours, minutes, int(seconds), get_current_time()))
5,340,912
def is_iterator(obj): """ Predicate that returns whether an object is an iterator. """ import types return type(obj) == types.GeneratorType or ('__iter__' in dir(obj) and 'next' in dir(obj))
5,340,913
def delete_entries(keytab_file: str, slots: t.List[int]) -> bool: """ Deletes one or more entries from a Kerberos keytab. This function will only delete slots that exist within the keylist. Once the slots are deleted, the current keylist will be written to a temporary file. This avoids having the keylist appended to the keylist within the keytab file. Once the keylist is written to the temporary file, the temporary file will be move/renamed the original keytab filename. :param keytab_file: Kerberos V5 keytab file name. The file can be a relative path read from the user's home directory. :param slots: list of slots to be deleted from the keylist. :return: True on success, otherwise False. """ keytab_file = ktutil.keytab_exists(keytab_file) if not keytab_file or not isinstance(slots, list): return False keytab_tmp = ktutil.resolve_keytab_file(f"{keytab_file}.tmp") kt = ktutil() # Read the Kerberos keytab file first to check if slots exist before # trying to delete them. kt.read_kt(keytab_file) kt.list() kt.quit() existing_slots = [ key["slot"] for key in kt.keylist if key["slot"] in slots] if len(existing_slots) == 0: return False # No slots exist to be deleted. # Re-initialize 'ktutil' command and delete the slot(s). # Write the current keylist to a temporary file, then rename # the temporary file to the original name. This avoids the # duplication caused by the ``write_kt`` invocation. kt.ktutil_init() kt.read_kt(keytab_file) for slot in existing_slots: kt.delete_entry(slot) kt.write_kt(keytab_tmp) kt.quit() shutil.move(keytab_tmp, keytab_file) return True if kt.error else False
5,340,914
def detect(environ, context=None): """ parse HTTP user agent string and detect a mobile device. """ context = context or Context() try: ## if key 'HTTP_USER_AGENT' doesn't exist, ## we are not able to decide agent class in the first place. ## so raise KeyError to return NonMobile agent. carrier = detect_fast(environ['HTTP_USER_AGENT']) ## if carrier is 'nonmobile', raise KeyError intentionally factory_class = { 'docomo' : context.docomo_factory, 'ezweb' : context.ezweb_factory, 'softbank': context.softbank_factory, 'willcom' : context.willcom_factory, }[carrier] return factory_class().create(environ, context) except KeyError: return NonMobile(environ, context)
5,340,915
def _sniff_scheme(uri_as_string): """Returns the scheme of the URL only, as a string.""" # # urlsplit doesn't work on Windows -- it parses the drive as the scheme... # no protocol given => assume a local file # if os.name == 'nt' and '://' not in uri_as_string: uri_as_string = 'file://' + uri_as_string return urllib.parse.urlsplit(uri_as_string).scheme
5,340,916
def update_running_status(results): """ Signal that the queries for the home page statistics finished running. """ cache_id = HOME_STATS_LOCK_ID cache_ttl = HOME_STATS_LOCK_TTL if cache.get(cache_id): cache.set(cache_id, False, cache_ttl)
5,340,917
async def get_product(id: UUID4): # noqa: A002 """Return ProductGinoModel instance.""" return await ProductGinoModel.get_or_404(id)
5,340,918
def LSIIR_unc(H,UH,Nb,Na,f,Fs,tau=0): """Design of stabel IIR filter as fit to reciprocal of given frequency response with uncertainty Least-squares fit of a digital IIR filter to the reciprocal of a given set of frequency response values with given associated uncertainty. Propagation of uncertainties is carried out using the Monte Carlo method. Parameters ---------- H: np.ndarray frequency response values. UH: np.ndarray uncertainties associated with real and imaginary part of H Nb: int order of IIR numerator polynomial. Na: int order of IIR denominator polynomial. f: np.ndarray frequencies corresponding to H Fs: float sampling frequency for digital IIR filter. tau: float initial estimate of time delay for filter stabilization. Returns ------- b,a: np.ndarray IIR filter coefficients tau: int time delay (in samples) Uba: np.ndarray uncertainties associated with [a[1:],b] References ---------- * Eichstädt, Elster, Esward and Hessling [Eichst2010]_ .. seealso:: :mod:`PyDynamic.uncertainty.propagate_filter.IIRuncFilter` :mod:`PyDynamic.deconvolution.fit_filter.LSIIR` """ runs = 1000 print("\nLeast-squares fit of an order %d digital IIR filter to the" % max(Nb,Na)) print("reciprocal of a frequency response given by %d values.\n" % len(H)) print("Uncertainties of the filter coefficients are evaluated using\n"\ "the GUM S2 Monte Carlo method with %d runs.\n" % runs) HRI = np.random.multivariate_normal(np.hstack((np.real(H),np.imag(H))),UH,runs) HH = HRI[:,:len(f)] + 1j*HRI[:,len(f):] AB = np.zeros((runs,Nb+Na+1)) Tau= np.zeros((runs,)) for k in range(runs): bi,ai,Tau[k] = LSIIR(HH[k,:],Nb,Na,f,Fs,tau,verbose=False) AB[k,:] = np.hstack((ai[1:],bi)) bi = np.mean(AB[:,Na:],axis=0) ai = np.hstack((np.array([1.0]),np.mean(AB[:,:Na],axis=0))) Uab= np.cov(AB,rowvar=0) tau = np.mean(Tau) return bi,ai, tau, Uab
5,340,919
def genMeasureCircuit(H, Nq, commutativity_type, clique_cover_method=BronKerbosch): """ Take in a given Hamiltonian, H, and produce the minimum number of necessary circuits to measure each term of H. Returns: List[QuantumCircuits] """ start_time = time.time() term_reqs = np.full((len(H[1:]),Nq),'*',dtype=str) for i, term in enumerate(H[1:]): for op in term[1]: qubit_index = int(op[1:]) basis = op[0] term_reqs[i][qubit_index] = basis # Generate a graph representing the commutativity of the Hamiltonian terms comm_graph = commutativity_type.gen_comm_graph(term_reqs) num_terms = len(comm_graph) # Find a set of cliques within the graph where the nodes in each clique # are disjoint from one another. try: max_cliques = clique_cover_method(comm_graph) except RecursionError as re: print('Maximum recursion depth reached: {}'.format(re.args[0])) return 0, 0, 0 end_time = time.time() print('MEASURECIRCUIT: {} found {} unique circuits'.format( clique_cover_method.__name__, len(max_cliques))) et = end_time - start_time print('MEASURECIRCUIT: Elapsed time: {:.6f}s'.format(et)) return num_terms, len(max_cliques), et
5,340,920
def run_test(session, m, data, batch_size, num_steps): """Runs the model on the given data.""" costs = 0.0 iters = 0 state = session.run(m.initial_state) for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)): cost, state = session.run([m.cost, m.final_state], { m.input_data: x, m.targets: y, m.initial_state: state }) costs += cost iters += 1 return costs / iters
5,340,921
def chunks(l, n): """ Yield successive n-sized chunks from l. Source: http://stackoverflow.com/a/312464/902751 """ for i in range(0, len(l), n): yield l[i:i + n]
5,340,922
def table_to_dict(table): """Convert Astropy Table to Python dict. Numpy arrays are converted to lists. This Can work with multi-dimensional array columns, by representing them as list of list. e.g. This is useful in the following situation. foo = Table.read('foo.fits') foo.to_pandas() <- This will not work if columns are multi-dimensional. The alternative is: foo = Table.read('foo.fits') bar = table_to_dict(foo) df = pd.DataFrame(bar, columns=bar.keys()) <- The desired result. """ total_data = {} multi_cols = [] for i, _ in enumerate(table.columns): # This looks unusual, but it is the only way to iterate over columns. col = table.columns[i] data = table[col.name].tolist() total_data[col.name] = data if len(col.shape) == 2: multi_cols.append(col.name) return total_data, multi_cols
5,340,923
def margin_loss(y_true, y_pred): """ Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it. :param y_true: [None, n_classes] :param y_pred: [None, num_capsule] :return: a scalar loss value. """ L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \ 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1)) R = K.mean(K.sum(L, 1)) return R
5,340,924
def strip_quotes(string): """Remove quotes from front and back of string >>> strip_quotes('"fred"') == 'fred' True """ if not string: return string first_ = string[0] last = string[-1] if first_ == last and first_ in '"\'': return string[1:-1] return string
5,340,925
def clear_all_tables(retain=[]): """Like ``clear_all_models`` above, except **much** faster.""" for table in reversed(Base.metadata.sorted_tables): if table.name not in retain: Session.execute(table.delete()) Session.commit()
5,340,926
def generate_local_dict(locale: str, init_english: bool = False): """Generate a dictionary with keys for each indicators and translatable attributes. Parameters ---------- locale : str Locale in the IETF format init_english : bool If True, fills the initial dictionary with the english versions of the attributes. Defaults to False. """ from xclim.core.indicator import registry if locale in _LOCALES: locname, attrs = get_local_dict(locale) for ind_name in attrs.copy().keys(): if ind_name != "attrs_mapping" and ind_name not in registry: attrs.pop(ind_name) else: attrs = {} attrs_mapping = attrs.setdefault("attrs_mapping", {}) attrs_mapping.setdefault("modifiers", [""]) for key, value in default_formatter.mapping.items(): attrs_mapping.setdefault(key, [value[0]]) eng_attr = "" for ind_name, indicator in registry.items(): ind_attrs = attrs.setdefault(ind_name, {}) for translatable_attr in set(TRANSLATABLE_ATTRS).difference( set(indicator._cf_names) ): if init_english: eng_attr = getattr(indicator, translatable_attr) if not isinstance(eng_attr, str): eng_attr = "" ind_attrs.setdefault(f"{translatable_attr}", eng_attr) for cf_attrs in indicator.cf_attrs: # In the case of single output, put var attrs in main dict if len(indicator.cf_attrs) > 1: ind_attrs = attrs.setdefault(f"{ind_name}.{cf_attrs['var_name']}", {}) for translatable_attr in set(TRANSLATABLE_ATTRS).intersection( set(indicator._cf_names) ): if init_english: eng_attr = cf_attrs.get(translatable_attr) if not isinstance(eng_attr, str): eng_attr = "" ind_attrs.setdefault(f"{translatable_attr}", eng_attr) return attrs
5,340,927
def generate_scheme_from_file(filename=None, fileobj=None, filetype='bson', alimit=1000, verbose=0, encoding='utf8', delimiter=",", quotechar='"'): """Generates schema of the data BSON file""" if not filetype and filename is not None: filetype = __get_filetype_by_ext(filename) datacache = [] if filetype == 'bson': if filename: source = open(filename, 'rb') else: source = fileobj n = 0 for r in bson.decode_file_iter(source): n += 1 if n > alimit: break datacache.append(r) if filename: source.close() elif filetype == 'jsonl': if filename: source = open(filename, 'r', encoding=encoding) else: source = fileobj n = 0 for r in source: n += 1 if n > alimit: break datacache.append(orjson.loads(r)) if filename: source.close() elif filetype == 'csv': if filename: source = open(filename, 'r', encoding=encoding) else: source = fileobj n = 0 reader = csv.DictReader(source, quotechar=quotechar, delimiter=delimiter, quoting=csv.QUOTE_ALL) for r in reader: n += 1 if n > alimit: break datacache.append(r) if filename: source.close() n = 0 scheme = None for r in datacache: n += 1 if scheme is None: scheme = get_schema(r) else: scheme = merge_schemes([scheme, get_schema(r)]) return scheme
5,340,928
def getTests(): """Returns a dictionary of document samples for the Entity Types Person, Location, and Organization. Returns: [type] -- Returns a dictionary of document samples for the Entity Types Person, Location, and Organization. """ personDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Person'}, {'sentences.5': {'$exists': True}}]}) # Person and at least 5 test sentences locationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Location'}, {'sentences.5': {'$exists': True}}]}) # Location and at least 5 test sentences organizationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Organization'}, {'sentences.5': {'$exists': True}}]}) # Organization and at least 5 test sentences tests = {"person": personDocument, "location": locationDocument, "organization": organizationDocument} return tests
5,340,929
def ts_cor(a, b, min_sample = 3, axis = 0, data = None, state = None): """ ts_cor(a) is equivalent to a.cor()[0][1] - supports numpy arrays - handles nan - supports state management :Example: matching pandas ------------------------- >>> # create sample data: >>> from pyg_timeseries import *; import pandas as pd; import numpy as np >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0.5] = np.nan >>> b = pd.Series(np.random.normal(0,1,10000), drange(-9999)); b[b>0.5] = np.nan >>> state = data = None; min_sample = 3; axis = 0 >>> df = pd.concat([a,b], axis=1) >>> assert abs(df.corr()[0][1] - ts_cor(a, b))<1e-10 :Example: slightly faster than pandas ------------------------------------- %timeit ts_cor(a, b) 245 µs ± 6.43 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) %timeit df.corr()[0][1] 575 µs ± 13 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) :Example: numpy ----------------------------------- >>> assert ts_cor(a.values, b.values) == ts_cor(a,b) :Example: state management ------------------------------------------- >>> old = ts_std_(a.iloc[:2000]) >>> new = ts_std(a.iloc[2000:], vec = old.vec) >>> assert new == ts_std(a) """ state = state or dict(vec = _vec(a, None,6,0.)) rtn = first_(_ts_cor(a, b, min_sample = min_sample, **state)) return rtn
5,340,930
def get_robin_bndry_conditions(kappa,alpha,Vh): """ Do not pass element=function_space.ufl_element() as want forcing to be a scalar pass degree instead """ bndry_obj = get_2d_unit_square_mesh_boundaries() boundary_conditions=[] ii=0 for phys_var in [0,1]: for normal in [1,-1]: boundary_conditions.append( ['robin', bndry_obj[ii], [RobinBoundaryRHS(kappa,normal,alpha,'real',phys_var, degree=Vh.ufl_element().degree()), RobinBoundaryRHS(kappa,normal,alpha,'imag',phys_var, degree=Vh.ufl_element().degree())], [dl.Constant(0),alpha]]) ii+=1 return boundary_conditions
5,340,931
def _AddComputeMetadata(client, server, metadata): """Updates the metadata with network compute metadata.""" compute_metadata = { # test_instance_id, test_region and meta_os_info are informational and # used in conjunction with saving results. 'test_instance_id': server.machine_type, 'test_region': cloud_harmony_util.GetRegionFromZone(server.zone), 'meta_instance_id': client.machine_type, 'meta_region': cloud_harmony_util.GetRegionFromZone(client.zone), 'meta_zone': client.zone, } metadata.update(compute_metadata)
5,340,932
def versions_banner(): """The top-level method to draw banner for showing versions of T_System. """ import t_system.__init__ if not os.path.exists(t_system.dot_t_system_dir): os.mkdir(t_system.dot_t_system_dir) from t_system.logging import LogManager t_system.log_manager = LogManager(args={"verbose": False, "environment": None}) from t_system.stand import __version__ as stand_version from t_system.remote_ui import __version__ as remote_ui_version t_system_version = pkg_resources.get_distribution("t_system").version versions = f't_system: {t_system_version}\nremote_ui: {remote_ui_version}\nstand: {stand_version}' call(f'figlet -f term \'{versions}\' | boxes -d spring -a hcvc -p h8 | /usr/games/lolcat -a -d 1', shell=True)
5,340,933
def create_original_list_of_columns(dataframe): """ Gets the original dataframe list Args: dataframe (df): Pandas dataframe """ new_columns = [] list_of_columns = '/home/connormcdowall/finance-honours/data/178-factors.txt' file = open(list_of_columns, 'r') lines = file.readlines() for line in lines: line = line.rstrip('\n') new_columns.append(line) # Only collect column in both lists cols = dataframe.columns extract_columns = [] for column in new_columns: if column in cols: extract_columns.append(column) # Rewrite new working file for numerical encoding file = open( "/home/connormcdowall/finance-honours/data/working-columns.txt", "r+") file.truncate(0) file.close() textfile = open( "/home/connormcdowall/finance-honours/data/working-columns.txt", "w") for element in extract_columns: textfile.write(element + "\n") textfile.close() return
5,340,934
def get_prop_datatypes(labels, propnames, MB=None): """Retrieve the per-property output datatypes.""" rp = regionprops(labels, intensity_image=MB, cache=True) datatypes = [] for propname in propnames: if np.array(rp[0][propname]).dtype == 'int64': datatypes.append('int32') else: datatypes.append('float') return datatypes
5,340,935
def parse_html(html: str) -> Tuple[str, str]: """ This function parses the html, strips the tags an return the title and the body of the html file. Parameters ---------- html : str The HTML text Returns ------- Tuple[str, str] A tuple of (title, body) """ # doc = pq(html) title = doc("title").text() body = doc("body").text() return (title, body)
5,340,936
def block(keyword, multi=False, noend=False): """Decorate block writing functions.""" def decorator(func): from .._common import header @wraps(func) def wrapper(*args, **kwargs): head_fmt = "{:5}{}" if noend else "{:5}{}\n" out = [head_fmt.format(keyword, header)] out += func(*args, **kwargs) out += ["\n"] if multi else [] return out return wrapper return decorator
5,340,937
def show_choices(choices): """ Print the available choices for a specific selection by the user. Paramters --------- choices: dict or list Dictionary or list containing the available choices. If a dictionary, its keys will be used as bullet for the printed list. """ if isinstance(choices, dict): for key, item in choices.items(): echo('{: >3}. {}'.format(key, item)) elif isinstance(choices, list): for choice in choices: echo(' - {}'.format(choice)) echo('') return
5,340,938
def data(path): """Get the file from the specified path from the data directory. Parameters ---------- path : str The relative path to the file in the data directory. Returns ------- file : File The requested file. """ return send_from_directory(app.config['DATA_DIRECTORY'], path)
5,340,939
def search_software_fuzzy(query, max=None, csv_filename=None): """Returns a list of dict for the software results. """ results = _search_software(query) num = 0 softwares = [] while True: for r in results: r = _remove_useless_keys(r) softwares.append(r) num += len(results) # quit if no results or results number reach the max if num == 0 or (max and num >= max): break query_string = _get_next_page_query(results[-1]['SearchResultDescr']) if not query_string: break try: results = _get_software_search_results(query_string) # Sometimes it responds 50x http error for some keywords, # but it's not the client's fault. except requests.exceptions.HTTPError as e: logging.warning(f'{e.response.status_code} HTTP Error occurred ' f'during pagination: {e.response.url}') break if csv_filename: _write_software_results(softwares, csv_filename) return return softwares
5,340,940
def fitCirc(x,y,xerr = None, rIni = None, aveR=False): """ Performs a circle fit to data using least square residuals. Parameters ---------- x : An array of length N. y : An array of length N. xerr : None or an array of length N, If provided, it is the standard-deviation of points. This vector, if given, will be used as weights in the fit. rIni : is a maximum radius of the circle to be fitted. aveR : if True, returns the average deviation from the fit. Returns ------- xc, yc, R : center and the radius of the circle. errorbars : errorbars on the center x, y and the radius. aveResid : (optional) average residual """ x=np.array(x) y=np.array(y) if x.size<2: print('fitCirc: not enough data points to fit circle') return x_m = np.mean(x) y_m = np.mean(y) if xerr == None or all(xerr)==0: xerr = np.ones(len(x)) else: xerr=np.array(xerr) xerr[np.where(xerr==0)]=100 def calc_R(xc, yc): """ calculate the distance of each 2D points from the center (xc, yc) """ return np.sqrt((x-xc)**2 + (y-yc)**2) def resid(pars): """ calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) """ # xc,yc, radius = pars v = pars.valuesdict() xc,yc, radius = v['xc'],v['yc'],v['radius'] Ri = calc_R(xc,yc) if rIni is not None and radius>rIni: return 10000000*(Ri - radius) return (Ri - radius)/np.array(xerr) center_estimate = x_m, y_m radius = calc_R(*center_estimate).mean() if rIni is not None and radius>rIni: radius = rIni params = Parameters() params.add('xc', x_m) params.add('yc', y_m) params.add('radius', radius, min=0) minzer=minimize(resid,params=params) res = minzer.params xc, yc, R = res['xc'].value,res['yc'].value,res['radius'].value errorbars = [res['xc'].stderr,res['yc'].stderr,res['radius'].stderr] aveResid = sum(abs(minzer.residual))/x.size if rIni is not None and R>rIni: print('radius greater than initial, resid=',aveResid) if aveR: return xc,yc,R,errorbars,aveResid else: return xc,yc,R, errorbars
5,340,941
def get_clients(): """ Return current clients --- tags: - clients operationId: listClients produces: - application/json schemes: ['http', 'https'] responses: 200: description: List of clients schema: type: array items: $ref: '#/definitions/Client' """ return jsonify_obj(get_locker().get_clients())
5,340,942
def GetInstanceListForHypervisor(hname, hvparams=None, get_hv_fn=hypervisor.GetHypervisor): """Provides a list of instances of the given hypervisor. @type hname: string @param hname: name of the hypervisor @type hvparams: dict of strings @param hvparams: hypervisor parameters for the given hypervisor @type get_hv_fn: function @param get_hv_fn: function that returns a hypervisor for the given hypervisor name; optional parameter to increase testability @rtype: list @return: a list of all running instances on the current node - instance1.example.com - instance2.example.com """ try: return get_hv_fn(hname).ListInstances(hvparams=hvparams) except errors.HypervisorError, err: _Fail("Error enumerating instances (hypervisor %s): %s", hname, err, exc=True)
5,340,943
def check_cutoff_properties(cutoffs): """Helper function to test common properties of cutoffs""" assert isinstance(cutoffs, np.ndarray) assert all([is_int(cutoff) for cutoff in cutoffs]) assert cutoffs.ndim == 1 assert len(cutoffs) > 0
5,340,944
def is_nersc_system(system = system()): """Whether current system is a supported NERSC system.""" return (system is not None) and (system in _system_params.keys())
5,340,945
def main(): """get a list of all installed cameras""" # create file strings from os environment variables lbplog = os.environ['LBPLOG'] + "/synology/synology.log" lbpdata = os.environ['LBPDATA'] + "/synology/cameras.dat" # creating log file and set log format logging.basicConfig(filename=lbplog,level=logging.INFO,format='%(asctime)s: %(message)s ') logging.info("<INFO> camera.py: init...") # open config file and read options try: ds = DiskStation() s = ds.Login() except: logging.info("<ERROR> login to DiskStation was not possible") quit() if s == True: cam_file = open(lbpdata, "w") cam_list = ds.GetCams() if cam_list != '': for c in cam_list.json().get('data').get('cameras'): c_id = str(c.get('id')) c_vendor = str(c.get('vendor')) c_model = str(c.get('model')) cam_file.write(c_id + ":" + c_vendor + " - " + c_model) cam_file.close() ds.Logout() else: quit()
5,340,946
def is_string_type_suspicious_score(confidence_score, params): """ determine if string type confidence score is suspicious in reputation_params """ return not isinstance(confidence_score, int) and CONFIDENCE_LEVEL_PRIORITY.get( params['override_confidence_level_suspicious'], 10) <= CONFIDENCE_LEVEL_PRIORITY.get(confidence_score.lower(), -1)
5,340,947
def align_embeddings(base_embed, other_embed, sample_size=1): """Fit the regression that aligns model1 and model2.""" regression = fit_w2v_regression(base_embed, other_embed, sample_size) aligned_model = apply_w2v_regression(base_embed, regression) return aligned_model
5,340,948
def test_skip_fixture(executed_docstring_source): """ >>> import pytest >>> @pytest.fixture ... def skip_fixture(): ... pytest.skip() >>> def test_skip_fixture_example(skip_fixture): ... pass """ assert_that(executed_docstring_source.allure_report, has_test_case("test_skip_fixture_example", with_status("skipped"), has_status_details(with_message_contains("Skipped")), has_container(executed_docstring_source.allure_report, has_before("skip_fixture", with_status("skipped"), has_status_details( with_message_contains("Skipped"), with_trace_contains("skip_fixture") ), ), ) ) )
5,340,949
def validate_dict_keys(dict_to_check: dict, allowed_keys: set, necessary_keys: Optional[set] = None, dict_name: Optional[str] = None) -> bool: """If you use dictionaries to pass parameters, there are two common errors: * misspelled keys * necessary keys are missing This functions checks whether all keys are in the set of allowed_keys and raises ValueError if a unknown key is found. It can also check whether all necessary keys are present and raises ValueError if not. dict_name can be used for a better error message.""" if not dict_name: # fallback to neutral dict_name = 'dictionary' # In case something other than a set is provided: allowed_keys = convert_to_set(allowed_keys) if necessary_keys: # also make sure it is a set: necessary_keys = convert_to_set(necessary_keys) # Are all necessary keys in the allowed key list? if len(necessary_keys - allowed_keys) != 0: msg = ("Contradiction: Not all necessary keys " + "are in the allowed keys set!") logging.exception(msg) raise ValueError(msg) # Get all keys in the dictionary: try: found_keys = dict_to_check.keys() except AttributeError as no_dict: raise AttributeError('Expected a dictionary for the dict_to_check ' + 'parameter!') from no_dict # Check for unknown keys: for key in found_keys: if key not in allowed_keys: msg = f"Unknown key {key} in {dict_name}" logging.exception(msg) raise ValueError(msg) logging.debug('No unknown keys found.') # Check if all necessary keys are present: if necessary_keys: for key in necessary_keys: if key not in found_keys: msg = f"Necessary key {key} missing in {dict_name}!" logging.exception(msg) raise ValueError(msg) logging.debug('All necessary keys found.') return True
5,340,950
def data_unmerged(): """ Load HEWL diffraction data from APS 24-ID-C """ datapath = ["data", "data_unmerged.mtz"] return load_dataset(datapath)
5,340,951
def copy(object, *args): """Copy the object.""" copiedWrapper = wrapCopy( object ) try: copiedWrapper.name = copiedWrapper.name + "_Copy" except AttributeError: pass return copiedWrapper.createAndFillObject(None, *args)
5,340,952
def convert_to_n0(n): """ Convert count vector to vector of "greater than" counts. Parameters ------- n : 1D array, size K each entry k represents the count of items assigned to comp k. Returns ------- n0 : 1D array, size K each entry k gives the total count of items at index above k N0[k] = np.sum(N[k:]) Example ------- >> convertToN0([1., 3., 7., 2]) [12, 9, 2] """ n = np.asarray(n) n0 = np.zeros_like(n) n0[:-1] = n[::-1].cumsum()[::-1][1:] return n0
5,340,953
def application(environ, start_response): """Serve the button HTML.""" with open('wsgi/button.html') as f: response_body = f.read() status = '200 OK' response_headers = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(response_body))), ] start_response(status, response_headers) return [response_body.encode('utf-8')]
5,340,954
def main(args): """Handle the CLI command.""" # Track if anything was bad anything_suspicious = False anything_bad = False dump_dirs_all = [] # Unpack into bools mode_mac = args.mode in ["both", "mac"] mode_iphone = args.mode in ["both", "iphone"] method_attachments = args.method in ["both", "attachments"] method_datausagedb = args.method in ["both", "datausagedb"] # Scan for mac if desired if mode_mac: print(util.eqbound("Scanning local machine")) if method_attachments: print("Checking attachments") path = "~/Library/Messages/Attachments" out = core.check_attachments(path) anything_suspicious = anything_suspicious or out else: print("Cannot scan datausagedb for mac") # Scan for iphone if desired if mode_iphone: ibackup_installed = core.validate_ibackup_install() if not ibackup_installed: print("Cannot scan iphone backups without ibackuptools installed") return print(util.eqbound("Scanning iphone backups")) print("Searching for backups") backups = core.get_backup_data() if backups: # Filter if only newest or oldest backup desired if args.backups == "newest": backups = [sorted(backups, key=lambda v: v[2], reverse=True)[0]] elif args.backups == "oldest": backups = [sorted(backups, key=lambda v: v[2], reverse=False)[0]] # Scan all relevant backups for backup in backups: udid, datetime_str, datetime = backup print(f"Scanning backup {udid} from {datetime_str}") print("Extracting data from backup") dump_dir = core.dump_backup_data( udid, attachments=method_attachments, datausagedb=method_datausagedb ) dump_dirs_all.append(dump_dir) # Scan attachments if method_attachments: print("Checking attachments") out = core.check_attachments(dump_dir) anything_suspicious = anything_suspicious or out # Scan datausage db if method_datausagedb: print("Checking datausage db") path = dump_dir + "/Wireless/Library/Databases/DataUsage.sqlite" out = core.check_database(path) anything_bad = anything_bad or out print(util.eqbound("Scans complete")) if anything_bad: print("> Found evidence of successful attack.") elif anything_suspicious: print("> Found evidence of attempted attack.") else: print("> Found no evidence of attempted or successful attack.") if mode_iphone: if anything_suspicious or anything_bad or args.preserve: print("> Preserving backup data in:") for dump_dir in dump_dirs_all: print(f">\t{dump_dir}") else: print("> Wiping backup data") for dump_dir in dump_dirs_all: util.wipe_dir(dump_dir) elif args.preserve: print("> Nothing to preserve") print("Done")
5,340,955
def get_handler(): """ Return the handler configured by the most recent call to :func:`configure`. If :func:`configure` has not yet been called, this returns ``None``. """ return current_handler
5,340,956
def display_rf_feature_importance(cache, save_location: str = None): """ Displays which pixels have the most influence in the model's decision. This is based on sklearn,ensemble.RandomForestClassifier's feature_importance array Parameters ---------- save_location : str the location to save the figure on disk. If None, the plot is displayed on runtime and not saved. cache : dict the cache dict returned by the classifier. Must at least include ['actual', 'prediction'] objects, each with ['train', 'test'] arrays Returns ------- matplotlib.pyplot.figure : the figure """ fig = plt.figure() plt.title("Pixel importance in random forest classification") plt.imshow(cache['model'].feature_importances_.reshape((28,28)), extent=[0,28,28,0]) plt.colorbar() if save_location is None: plt.show() else: plt.savefig(fname=save_location) return fig
5,340,957
def btc_command(update: Update, _: CallbackContext) -> None: """Send a message when the command /btc is issued.""" btc = cg.get_price(ids='bitcoin', vs_currencies='usd') #btc = cg.get_coins_markets(vs_currency='usd') update.message.reply_text(btc)
5,340,958
def first_order(A, AB, B): """ First order estimator following Saltelli et al. 2010 CPC, normalized by sample variance """ return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
5,340,959
def test_cray_ims_base(cli_runner, rest_mock): """ Test cray ims base command """ runner, cli, _ = cli_runner result = runner.invoke(cli, ['ims']) assert result.exit_code == 0 outputs = [ "deleted", "public-keys", "recipes", "images", "jobs" ] compare_output(outputs, result.output)
5,340,960
def test_invalid_env(): """Verifies that an invalid environment variable will cause the CLI to exit with a non-zero status code.""" helpers.log_status(cast(FrameType, currentframe()).f_code.co_name) result = helpers.execute_command( ["-v", "--env", "COMPOSE_PROJECT_NAMEtest", "version"] ) assert result.exit_code == 2 assert "Invalid key-value pair" in result.output result = helpers.execute_command(["-v", "--env", "=", "version"]) assert result.exit_code == 2 assert "Invalid key-value pair" in result.output helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
5,340,961
def mod_df(arr,timevar,istart,istop,mod_name,ts): """ return time series (DataFrame) from model interpolated onto uniform time base """ t=timevar.points[istart:istop] jd = timevar.units.num2date(t) # eliminate any data that is closer together than 10 seconds # this was required to handle issues with CO-OPS aggregations, I think because # they use floating point time in hours, which is not very accurate, so the FMRC # aggregation is aggregating points that actually occur at the same time dt =diff(jd) s = array([ele.seconds for ele in dt]) ind=where(s>10)[0] arr=arr[ind+1] jd=jd[ind+1] b = pd.DataFrame(arr,index=jd,columns=[mod_name]) # eliminate any data with NaN b = b[isfinite(b[mod_name])] # interpolate onto uniform time base, fill gaps up to: (10 values @ 6 min = 1 hour) c = pd.concat([b, ts],axis=1).interpolate(limit=10) return c
5,340,962
def generate(schema_file: str, output_dir: str, prefix: str, unmask: bool = False, builtins: bool = False) -> None: """ Generate C code for the given schema into the target directory. :param schema_file: The primary QAPI schema file. :param output_dir: The output directory to store generated code. :param prefix: Optional C-code prefix for symbol names. :param unmask: Expose non-ABI names through introspection? :param builtins: Generate code for built-in types? :raise QAPIError: On failures. """ assert invalid_prefix_char(prefix) is None schema = QAPISchema(schema_file) gen_types(schema, output_dir, prefix, builtins) gen_visit(schema, output_dir, prefix, builtins) gen_commands(schema, output_dir, prefix) gen_events(schema, output_dir, prefix) #gen_introspect(schema, output_dir, prefix, unmask)
5,340,963
def hydrate_reserve_state(data={}): """ Given a dictionary, allow the viewmodel to hydrate the data needed by this view """ vm = State() return vm.hydrate(data)
5,340,964
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS): try: k = RegOpenKeyEx(base,K) i = 0 while 1: try: p = RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except RegError: break i = i + 1 except RegError: pass L.sort() L.reverse() return L
5,340,965
def doctest_ObjectWriter_get_state_Persistent(): """ObjectWriter: get_state(): Persistent objects >>> writer = serialize.ObjectWriter(dm) >>> top = Top() >>> pprint.pprint(writer.get_state(top)) {'_py_type': 'DBREF', 'database': 'pjpersist_test', 'id': '0001020304050607080a0b0c', 'table': 'Top'} But a persistent object can declare that it does not want a separate document: >>> top2 = Top() >>> top2._p_pj_sub_object = True >>> writer.get_state(top2, top) {'_py_persistent_type': 'pjpersist.tests.test_serialize.Top'} """
5,340,966
def mapDictToProfile(wordD, tdm): """ Take the document in as a dictionary with word:wordcount format, and map it to a p profile Parameters ---------- wordD : Dictionary Dictionary where the keys are words, and the values are the corrosponding word count tdm : termDocMatrix object The trained and factorized term-document matrix structure Returns ------- p : numpy array The mapped vector profile for the string Notes ----- References ---------- Examples -------- """ p = np.zeros(len(tdm.terms)) tfweight = float(sum(wordD.values())) for i in range(len(tdm.terms)): if tdm.terms[i] in wordD: p[i] = wordD[tdm.terms[i]]/tfweight #print len(p) p = np.multiply(tdm.idfs, p).transpose() return p
5,340,967
def create_monthly_network_triplets_country_partition(conn, *, month, year, suffix='', num_physical_shards=None, fillfactor=45): """ Function to DRY out creation of a new month/year partition for monthly_network_triplets_country. Arguments: conn: dirbs db connection object month: partition month year: partition year suffix: suffix string, default empty num_physical_shards: number of physical shards to apply, default none fillfactor: fill factor of the partition, default 45% """ if num_physical_shards is None: num_physical_shards = num_physical_imei_shards(conn) with conn.cursor() as cursor, utils.db_role_setter(conn, role_name='dirbs_core_import_operator'): part_name = monthly_network_triplets_country_partition(month=month, year=year, suffix=suffix) assert len(part_name) < 64 parent_tbl_name = 'monthly_network_triplets_country{0}'.format(suffix) cursor.execute( sql.SQL( """CREATE TABLE {0} PARTITION OF {1} FOR VALUES FROM %s TO %s PARTITION BY RANGE (virt_imei_shard) """ ).format(sql.Identifier(part_name), sql.Identifier(parent_tbl_name)), [(year, month), (year, month + 1)] ) _grant_perms_monthly_network_triplets(conn, part_name=part_name) # Create child partitions create_imei_shard_partitions(conn, tbl_name=part_name, num_physical_shards=num_physical_shards, perms_func=_grant_perms_monthly_network_triplets, fillfactor=fillfactor)
5,340,968
def test_compare_numpy_dictionaries(): """ Unit test for the compare_numpy_dictionaries function. """ dict1 = {1: np.zeros((2, 2)), 2: np.ones((2, 2))} dict2 = {1: np.zeros((2, 2)), 2: np.ones((2, 2))} dict3 = {1: np.ones((2, 2)), 2: np.zeros((2, 2))} dict4 = {0: np.ones((2, 2)), 1: np.zeros((2, 2))} assert compare_numpy_dictionaries(dict1, dict2) assert not compare_numpy_dictionaries(dict1, dict3) assert not compare_numpy_dictionaries(dict3, dict4)
5,340,969
def _to_tikz(g: BaseGraph[VT,ET], xoffset:FloatInt=0, yoffset:FloatInt=0, idoffset:int=0) -> Tuple[List[str],List[str]]: """Converts a ZX-graph ``g`` to a string representing a tikz diagram. The optional arguments are used by :func:`to_tikz_sequence`. """ verts = [] maxindex = idoffset for v in g.vertices(): p = g.phase(v) ty = g.type(v) if ty == VertexType.BOUNDARY: style = "none" elif ty == VertexType.H_BOX: style = "hadamard" else: style = 'Z' if ty==VertexType.Z else 'X' if p != 0: style += " phase" style += " dot" if (ty == VertexType.H_BOX and p == 1) or (ty != VertexType.H_BOX and p == 0): phase = "" else: ns = '' if p.numerator == 1 else str(p.numerator) dn = '' if p.denominator == 1 else str(p.denominator) if dn: phase = r"$\frac{%s\pi}{%s}$" % (ns, dn) else: phase = r"$%s\pi$" % ns x = g.row(v) + xoffset y = - g.qubit(v) - yoffset s = " \\node [style={}] ({:d}) at ({:.2f}, {:.2f}) {{{:s}}};".format(style,v+idoffset,x,y,phase) # type: ignore verts.append(s) maxindex = max([v+idoffset,maxindex]) # type: ignore edges = [] for e in g.edges(): v,w = g.edge_st(e) ty = g.edge_type(e) s = " \\draw " if ty == EdgeType.HADAMARD: if g.type(v) != VertexType.BOUNDARY and g.type(w) != VertexType.BOUNDARY: s += "[style=hadamard edge] " else: x = (g.row(v) + g.row(w))/2.0 +xoffset y = -(g.qubit(v)+g.qubit(w))/2.0 -yoffset t = " \\node [style=hadamard] ({:d}) at ({:.2f}, {:.2f}) {{}};".format(maxindex+1, x,y) verts.append(t) maxindex += 1 s += "({:d}) to ({:d});".format(v+idoffset,w+idoffset) # type: ignore edges.append(s) return (verts, edges)
5,340,970
def _fix_server_adress(raw_server): """ Prepend http:// there. """ if not raw_server.startswith("http://"): raw_server = "http://" + raw_server return raw_server
5,340,971
def _get_all_subdirs(path): """Example: For path='prod/ext/test' it returns ['prod/ext/test', 'prod/ext', 'prod', '']""" while True: parent, base = os.path.split(path) if base: yield path if not parent or path == parent: break path = parent yield ''
5,340,972
def _parse_output_keys(val): """Parse expected output keys from string, handling records. """ out = {} for k in val.split(","): # record output if ":" in k: name, attrs = k.split(":") out[name] = attrs.split(";") else: out[k] = None return out
5,340,973
def dfs_stats(path): """Print statistics about the file/directory at path""" client = get_dfs_client() client.start() print(client.status(path, strict=True)) client.stop()
5,340,974
def when_ready(server): """ server hook that only runs when the gunicorn master process loads """ import os import traceback import rethinkdb as r try: server.log.info("rethinkdb initialising") DB_HOST = os.environ.get("RETHINKDB_HOST") DB_PORT = os.environ.get("RETHINKDB_PORT") DB_DATABASE = os.environ.get("RETHINKDB_DATABASE") DB_TABLE = os.environ.get("RETHINKDB_TABLE") indexes = ['test'] conn = r.connect(host=DB_HOST, port=DB_PORT, db=DB_DATABASE) # Check if database exists, if not create it db_exists = r.db_list().contains(DB_DATABASE).run(conn) if not db_exists: server.log.info('adding database {0}'.format(DB_DATABASE)) r.db_create(DB_DATABASE).run(conn) # Check if table exist, if not create it table_exists = r.db(DB_DATABASE).table_list().contains(DB_TABLE).run(conn) if not table_exists: server.log.info('adding table {0}'.format(DB_TABLE)) r.db(DB_DATABASE).table_create(DB_TABLE).run(conn) # Check if index exists if not add it rtable = r.db(DB_DATABASE).table(DB_TABLE) current_indexes = rtable.index_list().run(conn) for index in indexes: if index not in current_indexes: server.log.info('adding index {0}'.format(index)) rtable.index_create(index).run(conn) server.log.info("rethinkdb ready") except Exception as e: server.log.error(traceback.format_exc()) server.log.error("rethinkdb failed to initialise")
5,340,975
def check(sync_urls: list, cursor: sqlite3.Cursor, db: sqlite3.Connection, status: str): """Checking update in the back. Args: sync_urls: URL(s) to be checked as a list cursor: Cursor object of sqlite3 db: Connection object of sqlite3 status: 'viewed' or 'unviewed' Return: Set of update links """ out_updates = [] for sync_url in sync_urls: links_fetch = [] links_from_db = [] https_updates = [] sync_url = sync_url.strip("/") f_links = fetch(sync_url) # .split(",") for f_link in set(f_links): links_fetch.append(f_link.strip()) db_links = cursor.execute( "SELECT link FROM links JOIN urls ON links.url_id=urls.url_id WHERE urls.url=?", (sync_url,), ) for link in db_links: links_from_db.append(link[0]) updates = [x for x in links_fetch if x not in set(links_from_db)] url_split = sync_url.split("/") for update in updates: if sync_url in update: https_updates.append(update) elif len(url_split) > 3: url_split = url_split[:3] https_updates.append("/".join(url_split) + "/" + update.strip("/")) else: https_updates.append(sync_url + "/" + update.strip("/")) url_id = cursor.execute( "SELECT url_id FROM urls WHERE url=?", (sync_url,) ).fetchone()[0] for update in updates: items = (url_id, update, status) cursor.execute( "INSERT INTO links (url_id, link, status) VALUES (?, ?, ?)", items ) db.commit() out_updates.extend(https_updates) return set(out_updates)
5,340,976
def _get_output_structure( text: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, tokenizer_args: Dict, ) -> Tuple[OrderedDict, Type]: """Function needed for saving in a dictionary the output structure of the transformers model. """ encoded_input = tokenizer([text], **tokenizer_args) output = model(**encoded_input) structure = OrderedDict() for key, value in output.items(): if isinstance(value, torch.Tensor): structure[key] = None else: size = _get_size_recursively(value) structure[key] = size return structure, type(output)
5,340,977
def encode_board(board): """ Encode the 2D board list to a 64-bit integer """ new_board = 0 for row in board.board: for tile in row: new_board <<= 4 if tile is not None: new_board += tile.val return new_board
5,340,978
def main(): """ Run serial communication. """ global ser # serial communication handler global version print 'Welcome to DigCam Terminal!' # if can't open USB port, quit if openSerialPort() == -1: sys.exit() # display instructions helpMessage() # deletes trash while ser.inWaiting() > 0: ser.read() # main loop while True: com = raw_input('> Insert instruction: ') # ask user for command if com == 'exit': ser.close(); print 'Goodbye!' break elif com == 'help': helpMessage() elif com == 'about': print 'DigCam Terminal Version ' + str(version) print 'Authors:' print '\t Fernando Barbosa' print '\t Joao Kunzel' print '\t Roberto Walter' else: # then it wasn't a terminal command code = instructions.encode(com); if code != -1: # check if there was an error ser.write(code) # write serial data # wait for acknowledgement answer = ser.read() # check answer while answer != ACK: # ACK - code for acknowledgement answer = ser.read() if answer == '': print 'Error: connection timeout!' sys.exit() answer = ser.read() # check answer # print until end of stream while answer != END: # END - code for end of stream if answer == LOG: readImage() else: sys.stdout.write(answer) answer = ser.read()
5,340,979
def get_symbol(i): """Get the symbol corresponding to int ``i`` - runs through the usual 52 letters before resorting to unicode characters, starting at ``chr(192)``. Examples -------- >>> get_symbol(2) 'c' >>> oe.get_symbol(200) 'Ŕ' >>> oe.get_symbol(20000) '京' """ if i < 52: return einsum_symbols_base[i] return chr(i + 140)
5,340,980
def longest_common_substring(string1, string2): """ Function to find the longest common substring of two strings """ m = [[0] * (1 + len(string2)) for i in range(1 + len(string1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(string1)): for y in range(1, 1 + len(string2)): if string1[x - 1] == string2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return string1[x_longest - longest: x_longest]
5,340,981
def check_output(output_fields): """ Sample helper function used to check the output fields sent by SPARKL. It is referenced by the set of test data below. Each helper function takes as parameter a dictionary that contains all output fields. E.g.: { 'sample_out_field1' : 3, 'sample_out_field2': 'some_string'} """ output_field = output_fields['output_field_name'] assert isinstance(output_field, str), '{} must be a ' \ 'string!'.format(output_field)
5,340,982
def elapsed_time_id(trace, event_index: int): """Calculate elapsed time by event index in trace :param trace: :param event_index: :return: """ try: event = trace[event_index] except IndexError: # catch for 0 padding. # calculate using the last event in trace event = trace[-1] return elapsed_time(trace, event)
5,340,983
async def roots2(ctx, n1: float, n2: float): """Root an poly""" author = ctx.message.author num = [n1, n2] answer = numpy.roots(num) embed = discord.Embed(color=discord.Color.from_rgb(222, 137, 127)) embed.set_author(name=author.display_name, url="https://youtu.be/dQw4w9WgXcQ", icon_url=author.avatar_url) embed.title = "🧮 Calculation" # embed.set_thumbnail(url="https://cdn.discordapp.com/avatars/762347346993348659/335e22e0f77a6bb717502981f57221e2.png") embed.description = "A roots of one-dimension polynomial (two numbers)" embed.add_field(name="Input", value=str(numpy.poly1d(num)), inline=False) embed.add_field(name="Results", value=str(answer), inline=False) embed.set_footer(text="Hello! My name is Kasumi Toyama! My father is HelloYeew#2740.") await ctx.send(embed=embed)
5,340,984
def save_pos(nestable=False): """Save the cursor position. This function is a context manager for use in ``with`` statements. It saves the cursor position when the context is entered and restores the cursor to the saved position when the context is exited. If ``nestable=False`` the ANSI control sequences ``ESC[s: SCP - Save Cursor Position`` and ``ESC[u: RCP - Restore Cursor Position`` will be used. By doing so only the saved cursor position of the innermost context will be restored. If ``nestable=True`` and :func:`nestable_save_pos_possible` returns ``True``, each time the context is entered the position is determined by calling ``getyx()`` from the `term package <https://pypi.org/project/term/>`_. If the terminal does not support the control sequence ``ESC[6n: DSR – Device Status Report`` this is not possible. :param bool nestable: wether a nestable context should be used :raises RuntimeError: if ``nestable=True`` and nestable context not possible """ if nestable: pos = term.getyx() if pos == (0, 0): raise RuntimeError('Nestable save_pos context not supported') else: _print(CS.SCP) try: yield finally: if nestable: move(*pos, False) else: _print(CS.RCP)
5,340,985
def test_find_microbit_posix_missing(): """ Simulate being on os.name == 'posix' and a call to "mount" returns a no records associated with a micro:bit device. """ with open('tests/mount_missing.txt', 'rb') as fixture_file: fixture = fixture_file.read() with mock.patch('os.name', 'posix'): with mock.patch('uflash.check_output', return_value=fixture): assert uflash.find_microbit() is None
5,340,986
def build_candidate_digest(proof, leaf_hash): """ Build the candidate digest representing the entire ledger from the Proof hashes. :type proof: dict :param proof: The Proof object. :type leaf_hash: bytes :param leaf_hash: The revision hash to pair with the first hash in the Proof hashes list. :rtype: bytes :return: The calculated root hash. """ parsed_proof = parse_proof(proof) root_hash = calculate_root_hash_from_internal_hashes(parsed_proof, leaf_hash) return root_hash
5,340,987
def register_configs(settings_cls: Optional[Type[Settings]] = None, settings_group: Optional[str] = None): """ Register configuration options in the main ConfigStore.instance(). The term `config` is used for a StructuredConfig at the root level (normally switchable with `-cn` flag in Hydra, here we use only one default config). Fields of the main config use StructuredConfigs with class names ending in `Settings`. `Conf` suffix is used for external schemas provided by the `hydra-torch` package for PyTorch/PyTorch Lightning integration, e.g. `AdamConf`. """ cs = ConfigStore.instance() settings_group = settings_group or 'cfg' # Main config root = RootConfig() root.defaults[0] = {settings_group: 'default'} cs.store(name='root', node=DictConfig(root)) # Config groups with defaults, YAML files validated by Python structured configs # e.g.: `python train.py experiment=default` cs.store(group=settings_group, name='settings_schema', node=settings_cls if settings_cls is not None else Settings)
5,340,988
def test_about(client): """Tests the about route.""" assert client.get('/about').status_code == 200
5,340,989
def confusion_matrix_eval(cnn, data_loader): """Retrieves false positives and false negatives for further investigation Parameters ---------- cnn : torchvision.models A trained pytorch model. data_loader : torch.utils.data.DataLoader A dataloader iterating through the holdout test sample. Returns ------- dict Dictionary containing cases model classified as false positives and false negatives. Raises ------ ValueError Raised if data_loader is not iterable. """ if not isinstance(data_loader, Iterable): raise ValueError("data_loader is not iterable") fp = [] fn = [] cnn.eval() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") with torch.no_grad(): for i, (inputs, classes) in enumerate(data_loader): inputs = inputs.to(device) classes = classes.to(device) outputs = cnn(inputs).flatten() preds = torch.sigmoid(outputs) > 0.5 j = 0 for t, p in zip(classes.view(-1), preds.view(-1)): if [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [ 0.0, 1.0, ]: fp.append( data_loader.dataset.samples[(i * data_loader.batch_size + j)][1] ) elif [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [ 1.0, 0.0, ]: fn.append( data_loader.dataset.samples[(i * data_loader.batch_size + j)][1] ) j += 1 return {"false_positives": fp, "false_negatives": fn}
5,340,990
def test_export_convert_to_jpeg_quality(): """test --convert-to-jpeg --jpeg-quality""" import glob import os import os.path import pathlib from osxphotos.cli import export runner = CliRunner() cwd = os.getcwd() # pylint: disable=not-context-manager with runner.isolated_filesystem(): result = runner.invoke( export, [ os.path.join(cwd, PHOTOS_DB_15_7), ".", "-V", "--convert-to-jpeg", "--jpeg-quality", "0.2", ], ) assert result.exit_code == 0 files = glob.glob("*") assert sorted(files) == sorted(CLI_EXPORT_FILENAMES_CONVERT_TO_JPEG) large_file = pathlib.Path(CLI_EXPORT_CONVERT_TO_JPEG_LARGE_FILE) assert large_file.stat().st_size < 1000000
5,340,991
def match_by_hashed_faceting(*keys): """Match method 3 - Hashed Faceted Search""" matches = [] hfs = [] for i in range(len(__lookup_attrs__)): key = [x for x in keys if x[0] == __lookup_attrs__[i]] if key: hfs.append(key[0]) hashed_val = hashlib.sha256(str(hfs).encode('utf-8')).hexdigest() hashed_key = keynamehelper.create_key_name("hfs", hashed_val) for found_key in redis.sscan_iter(hashed_key): matches.append(found_key) return matches
5,340,992
def get_derivative_density_matrix(mat_diag,mat_Rabi,sigma_moins_array,**kwargs): """ Returns function for t-evolution using the numerical integration of the density matrix \dot{\rho}=-i(H_eff \rho-\rho H_eff^{\dagger}) +\Gamma \sum_j \sigma_j^_ \rho \sigma_j^+ """ dim=len(mat_diag) tunneling=kwargs.get('tunneling','on') if tunneling=='off': def L_on_rho_loc(tt,yy): yy=np.reshape(yy, (dim,dim)) H_eff=csr_matrix(square_mat(mat_diag)) deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array) return np.reshape(deriv, dim*dim) return L_on_rho_loc else: def L_on_rho_loc(tt,yy): yy=np.reshape(yy, (dim,dim)) H_eff=csr_matrix(mat_Rabi+square_mat(mat_diag)) deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array) return np.reshape(deriv, dim*dim) return L_on_rho_loc
5,340,993
def create_alien(setting, screen, aliens, alien_number, row_number): """ """ alien = Alien(setting, screen) alien_width = alien.rect.width alien.x = alien_width + 2 * alien_width * alien_number alien.rect.x = alien.x alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number aliens.add(alien)
5,340,994
def start_http_server(): """Starts a simple http server for the test files""" # For the http handler os.chdir(TEST_FILES_DIR) handler = SimpleHTTPServer.SimpleHTTPRequestHandler handler.extensions_map['.html'] = 'text/html; charset=UTF-8' httpd = ThreadedTCPServer(("localhost", 0), handler) ip, port = httpd.server_address httpd_thread = threading.Thread(target=httpd.serve_forever) httpd_thread.setDaemon(True) httpd_thread.start() return (ip, port, httpd, httpd_thread)
5,340,995
def area_of_polygon(polygon): """ Returns the area of an OpenQuake polygon in square kilometres """ lon0 = np.mean(polygon.lons) lat0 = np.mean(polygon.lats) # Transform to lamber equal area projection x, y = lonlat_to_laea(polygon.lons, polygon.lats, lon0, lat0) # Build shapely polygons poly = geometry.Polygon(zip(x, y)) return poly.area
5,340,996
def build_dataset_values(claim_object, data_value): """ Build results with different datasets. Parameters: claim_object (obj): Onject to modify and add to rows . data_value (obj): result object Returns: Modified claim_boject according to data_value.type """ if data_value["type"] == "globecoordinate": claim_object["str"] = str(data_value["value"]["latitude"]) + "," + str(data_value["value"]["longitude"]) elif data_value["type"] == "time": claim_object["date"] = data_value["value"]["time"].split("T")[0].split("+")[1] elif data_value["type"] == "string": claim_object["str"] = data_value["value"] else: pass return claim_object
5,340,997
def get_yolk_dir(): """Return location we store config files and data.""" return os.path.abspath('%s/.yolk' % os.path.expanduser('~'))
5,340,998
def mapfile_fsc3d( input_filename1, input_filename2, output_filename=None, kernel=9, resolution=None ): """ Compute the local FSC of the map Args: input_filename1 (str): The input map filename input_filename2 (str): The input map filename output_filename (str): The output map filename kernel (int): The kernel size resolution (float): The resolution limit """ # Open the input files infile1 = read(input_filename1) infile2 = read(input_filename2) # Get the data data1 = infile1.data data2 = infile2.data # Reorder input arrays data1 = reorder(data1, read_axis_order(infile1), (0, 1, 2)) data2 = reorder(data2, read_axis_order(infile2), (0, 1, 2)) # Compute the local FSC fsc = fsc3d( data1, data2, kernel=kernel, resolution=resolution, voxel_size=infile1.voxel_size, ) # Reorder output array fsc = reorder(fsc, (0, 1, 2), read_axis_order(infile1)) # Write the output file write(output_filename, fsc.astype("float32"), infile=infile1)
5,340,999