content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def read_starlight_output_syn_spec(lines): """ read syn_spec of starlight output """ Nl_obs = len(lines) wave = Column(np.zeros((Nl_obs, ), dtype=np.float), 'wave') flux_obs = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_obs') flux_syn = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_syn') weight = Column(np.zeros((Nl_obs, ), dtype=np.float), 'weight') for i, line in enumerate(lines): line_split = line.split() wave[i] = np.float(line_split[0]) flux_obs[i] = np.float(line_split[1]) flux_syn[i] = np.float(line_split[2]) weight[i] = np.float(line_split[3]) return Table([wave, flux_obs, flux_syn, weight])
e25aed3ff9294f07b1549b610030241895b78f67
3,647,100
def get_stations_trips(station_id): """ https://api.rasp.yandex.net/v1.0/schedule/ ? apikey=<ключ> & format=<формат> & station=<код станции> & lang=<язык> & [date=<дата>] & [transport_types=<тип транспорта>] & [system=<текущая система кодирования>] & [show_systems=<коды в ответе>] """ params = { 'apikey': RASP_KEY, 'format': 'json', 'station': station_id, 'lang': 'ua', 'transport_types': 'suburban' } url = 'https://api.rasp.yandex.net/v1.0/schedule/' return get_json(url, params)
8b841f19b135e7792e2f8d3aad642f38b2a6cd74
3,647,101
def _compute_pairwise_kpt_distance(a, b): """ Args: a, b (poses): Two sets of poses to match Each "poses" is represented as a list of 3x17 or 4x17 np.ndarray """ res = np.zeros((len(a), len(b))) for i in range(len(a)): for j in range(len(b)): res[i, j] = pck_distance(a[i], b[j]) return res
aaf4696292bb7d1e9377347d93d97da321787c6f
3,647,102
def _extract_dialog_node_name(dialog_nodes): """ For each dialog_node (node_id) of type *standard*, check if *title exists*. If exists, use the title for the node_name. otherwise, use the dialog_node For all other cases, use the dialog_node dialog_node: (dialog_node_title, dialog_node_type) In the case of Login Issues, "title": "Login Issue", "dialog_node": "Login Issues", the record will be created as: "Login Issues": ("Login Issue", "standard") """ nodes_dict = {} nodes_type = {} for obj in dialog_nodes: if (obj['type']=='standard') and ('title' in obj): if (obj['title'] is not None): nodes_dict[obj['dialog_node']] = (obj['title'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) return nodes_dict
23121efa486c2da16a54b2441bb1435eec5b8b49
3,647,103
import os def _get_content(tax_id): """Get Kazusa content, either from cached file or remotely.""" target_file = os.path.join(DATA_DIR, "%s.txt" % tax_id) if not os.path.exists(target_file): url = ( "http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?" + "aa=1&style=N&species=%s" % tax_id ) urlretrieve(url, target_file) with open(target_file) as fle: return fle.read()
d9dafeed28101cf83ba652fcc5d4e92e7757ddc6
3,647,104
from typing import Dict from typing import List def search_all_entities(bsp, **search: Dict[str, str]) -> Dict[str, List[Dict[str, str]]]: """search_all_entities(key="value") -> {"LUMP": [{"key": "value", ...}]}""" out = dict() for LUMP_name in ("ENTITIES", *(f"ENTITIES_{s}" for s in ("env", "fx", "script", "snd", "spawn"))): entity_lump = getattr(bsp, LUMP_name, shared.Entities(b"")) results = entity_lump.search(**search) if len(results) != 0: out[LUMP_name] = results return out
ca24b50524cc96b35a605bebc5ead0d8d4342314
3,647,105
import re def is_probably_beginning_of_sentence(line): """Return True if this line begins a new sentence.""" # Check heuristically for a parameter list. for token in ['@', '-', r'\*']: if re.search(r'\s' + token + r'\s', line): return True stripped_line = line.strip() is_beginning_of_sentence = re.match(r'[^\w"\'`\(\)]', stripped_line) is_pydoc_ref = re.match(r'^:\w+:', stripped_line) return is_beginning_of_sentence and not is_pydoc_ref
68a6a2151b4559f0b95e0ac82a8a16bd06d9d1ff
3,647,106
def default_attack_handler(deck, discard, hand, turn, supply, attack): """Handle some basic attacks in a default manner. Returns True iff the attack was handled.""" covertool.cover("domsim.py:219") if attack == COUNCIL_ROOM: # Not really an attack, but this is an easy way to handle it. covertool.cover("domsim.py:221") hand += draw(deck, discard, 1) covertool.cover("domsim.py:222") return True elif MOAT in hand: covertool.cover("domsim.py:224") return True elif attack == MINION and len(hand) > 4: covertool.cover("domsim.py:226") discard += hand covertool.cover("domsim.py:227") hand[:] = draw(deck, discard, 4) covertool.cover("domsim.py:228") return True elif attack == WITCH: covertool.cover("domsim.py:230") gain(CURSE, supply, discard) covertool.cover("domsim.py:231") return True elif attack == SEA_HAG: covertool.cover("domsim.py:233") discard += draw(deck, discard, 1) covertool.cover("domsim.py:234") gain(CURSE, supply, deck) covertool.cover("domsim.py:235") return True else: covertool.cover("domsim.py:237") return False
79745f30b5607348771ee6d5778202370e553a7b
3,647,107
from sys import version import inspect def deprecate(remove_in, use_instead, module_name=None, name=None): """ Decorator that marks a function or class as deprecated. When the function or class is used, a warning will be issued. Args: remove_in (str): The version in which the decorated type will be removed. use_instead (str): The function or class to use instead. module_name (str): The name of the containing module. This will be used to generate more informative warnings. Defaults to None. name (str): The name of the object being deprecated. If not provided, this is automatically determined based on the decorated type. Defaults to None. """ def deprecate_impl(obj): if config.INTERNAL_CORRECTNESS_CHECKS and version(polygraphy.__version__) >= version(remove_in): G_LOGGER.internal_error("{:} should have been removed in version: {:}".format(obj, remove_in)) nonlocal name name = name or obj.__name__ if inspect.ismodule(obj): class DeprecatedModule(object): def __getattr__(self, attr_name): warn_deprecated(name, use_instead, remove_in, module_name) self = obj return getattr(self, attr_name) def __setattr__(self, attr_name, value): warn_deprecated(name, use_instead, remove_in, module_name) self = obj return setattr(self, attr_name, value) DeprecatedModule.__doc__ = "Deprecated: Use {:} instead".format(use_instead) return DeprecatedModule() elif inspect.isclass(obj): class Deprecated(obj): def __init__(self, *args, **kwargs): warn_deprecated(name, use_instead, remove_in, module_name) super().__init__(*args, **kwargs) Deprecated.__doc__ = "Deprecated: Use {:} instead".format(use_instead) return Deprecated elif inspect.isfunction(obj): def wrapped(*args, **kwargs): warn_deprecated(name, use_instead, remove_in, module_name) return obj(*args, **kwargs) wrapped.__doc__ = "Deprecated: Use {:} instead".format(use_instead) return wrapped else: G_LOGGER.internal_error("deprecate is not implemented for: {:}".format(obj)) return deprecate_impl
40877f67df96053ebedfbb580082b1c89ea4de6f
3,647,108
import os def get_available_processors(): """Return the list of available processors modules.""" modules = [item.replace('.py', '') for item in os.listdir(PROCESSORS_DIR) if isfile(join(PROCESSORS_DIR, item))] return modules
988d460e4b9c8662318bc0b15232993ef44212e0
3,647,109
def hello_world(): """return bool if exists -> take in email""" email = request.json['email'] c = conn.cursor() c.execute("select * from Users where Users.email = {}".format(email)) result = False conn.commit() conn.close() return result
c28b33c5106b51144d4b58f3bffd2ea128dd948a
3,647,110
from typing import Callable from typing import Optional import torch def get_model_relations( model: Callable, model_args: Optional[tuple] = None, model_kwargs: Optional[dict] = None, ): """ Infer relations of RVs and plates from given model and optionally data. See https://github.com/pyro-ppl/pyro/issues/949 for more details. This returns a dictionary with keys: - "sample_sample" map each downstream sample site to a list of the upstream sample sites on which it depend; - "sample_dist" maps each sample site to the name of the distribution at that site; - "plate_sample" maps each plate name to a list of the sample sites within that plate; and - "observe" is a list of observed sample sites. For example for the model:: def model(data): m = pyro.sample('m', dist.Normal(0, 1)) sd = pyro.sample('sd', dist.LogNormal(m, 1)) with pyro.plate('N', len(data)): pyro.sample('obs', dist.Normal(m, sd), obs=data) the relation is:: {'sample_sample': {'m': [], 'sd': ['m'], 'obs': ['m', 'sd']}, 'sample_dist': {'m': 'Normal', 'sd': 'LogNormal', 'obs': 'Normal'}, 'plate_sample': {'N': ['obs']}, 'observed': ['obs']} :param callable model: A model to inspect. :param model_args: Optional tuple of model args. :param model_kwargs: Optional dict of model kwargs. :rtype: dict """ if model_args is None: model_args = () if model_kwargs is None: model_kwargs = {} with torch.random.fork_rng(), torch.no_grad(), pyro.validation_enabled(False): with TrackProvenance(): trace = poutine.trace(model).get_trace(*model_args, **model_kwargs) sample_sample = {} sample_dist = {} plate_sample = defaultdict(list) observed = [] for name, site in trace.nodes.items(): if site["type"] != "sample" or site_is_subsample(site): continue sample_sample[name] = [ upstream for upstream in get_provenance(site["fn"].log_prob(site["value"])) if upstream != name ] sample_dist[name] = _get_dist_name(site["fn"]) for frame in site["cond_indep_stack"]: plate_sample[frame.name].append(name) if site["is_observed"]: observed.append(name) def _resolve_plate_samples(plate_samples): for p, pv in plate_samples.items(): pv = set(pv) for q, qv in plate_samples.items(): qv = set(qv) if len(pv & qv) > 0 and len(pv - qv) > 0 and len(qv - pv) > 0: plate_samples_ = plate_samples.copy() plate_samples_[q] = pv & qv plate_samples_[q + "__CLONE"] = qv - pv return _resolve_plate_samples(plate_samples_) return plate_samples plate_sample = _resolve_plate_samples(plate_sample) # convert set to list to keep order of variables plate_sample = { k: [name for name in trace.nodes if name in v] for k, v in plate_sample.items() } return { "sample_sample": sample_sample, "sample_dist": sample_dist, "plate_sample": dict(plate_sample), "observed": observed, }
b0cc8f58a70575492ba0c5efe7f282b0e9ab0a4e
3,647,111
import os def progress_enabled(): """ Checks if progress is enabled. To disable: export O4_PROGRESS=false """ return os.environ.get('O4_PROGRESS', 'true') == 'true'
f04b3375eb4150f23f377472eea659a8d03433ab
3,647,112
from typing import Any def is_valid_dim(x: Any) -> bool: """determine if the argument will be valid dim when included in torch.Size. """ return isinstance(x, int) and x > 0
09b8dd41b20a835583cd051868f13756e8383342
3,647,113
def compute_alpha(n, S_d, d_min): """ Approximate the alpha of a power law distribution. Parameters ---------- n: int or np.array of int Number of entries that are larger than or equal to d_min S_d: float or np.array of float Sum of log degrees in the distribution that are larger than or equal to d_min d_min: int The minimum degree of nodes to consider Returns ------- alpha: float The estimated alpha of the power law distribution """ return n / (S_d - n * np.log(d_min - 0.5)) + 1
9df2c39ccaa70e729b1bf2f7bfcc78cde0f649de
3,647,114
from typing import Optional def _head_object( s3_conn: S3Client, bucket: str, key: str ) -> Optional[HeadObjectOutputTypeDef]: """Retrieve information about an object in S3 if it exists. Args: s3_conn: S3 connection to use for operations. bucket: name of the bucket containing the key. key: name of the key to lookup. Returns: S3 object information, or None if the object does not exist. See the AWS documentation for explanation of the contents. Raises: botocore.exceptions.ClientError: any error from boto3 other than key not found is passed through. """ try: return s3_conn.head_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError as err: if err.response["Error"]["Code"] == "404": return None raise
3b7b239ea09bf3df75c9c9a3e3e19cba505d67a5
3,647,115
from operator import add def Residual(feat_maps_in, feat_maps_out, prev_layer): """ A customizable residual unit with convolutional and shortcut blocks Args: feat_maps_in: number of channels/filters coming in, from input or previous layer feat_maps_out: how many output channels/filters this block will produce prev_layer: the previous layer """ skip = skip_block(feat_maps_in, feat_maps_out, prev_layer) conv = conv_block(feat_maps_out, prev_layer) merged = add([skip, conv]) # the residual connection return LeakyReLU()(merged)
cfb7345340785a8fc7b3068c2baa0e5452b189aa
3,647,116
from typing import List from datetime import datetime def clean_detail_line_data(detail_row: List[str], date: str) -> List[str]: """ :param detail_row: uncleaned detail row :param date: job data to be added to data :return: a cleaned list of details fields """ if not detail_row: print('detail_row:', detail_row) return detail_row # The age field is an integer number of days between the date when the video was uploaded and Feb.15, # 2007 (YouTube's establishment) age_field_location = 2 age_date_format = '%Y-%m-%d' age = int(detail_row[age_field_location].strip()) if detail_row[age_field_location].strip() else 0 new_date = datetime.strptime('2007-02-15', age_date_format) + timedelta(days=age) detail_row[age_field_location] = datetime.strftime(new_date, age_date_format) return [date, ] + detail_row
ff9c3b8f5079674bf9a727f4baedc264443dbdeb
3,647,117
def lammps_prod(job): """Run npt ensemble production.""" in_script_name = "in.prod" modify_submit_lammps(in_script_name, job.sp) msg = f"sbatch submit.slurm {in_script_name} {job.sp.replica} {job.sp.temperature} {job.sp.pressure} {job.sp.cutoff}" return msg
faa122a6e22f54028cd536f78c17094f9c1f07b4
3,647,118
def gather_tensors(tensors, indices): """Performs a tf.gather operation on a set of Tensors. Args: tensors: A potentially nested tuple or list of Tensors. indices: The indices to use for the gather operation. Returns: gathered_tensors: A potentially nested tuple or list of Tensors with the same structure as the 'tensors' input argument. Contains the result of applying tf.gather(x, indices) on each element x in 'tensors'. """ return map_nested(lambda x: tf.gather(x, indices), tensors)
68fd88121cdca7beb13f3f5633401ddb420f34d4
3,647,119
import logging def _get_log_level(log_level_name): """ Get numeric log level corresponding to specified log level name """ # TODO: Is there a built-in method to do a reverse lookup? if log_level_name == LOG_LEVEL_NAME_CRITICAL: return logging.CRITICAL elif log_level_name == LOG_LEVEL_NAME_ERROR: return logging.ERROR elif log_level_name == LOG_LEVEL_NAME_WARNING: return logging.WARNING elif log_level_name == LOG_LEVEL_NAME_INFO: return logging.INFO elif log_level_name == LOG_LEVEL_NAME_DEBUG: return logging.DEBUG elif log_level_name == LOG_LEVEL_NAME_TRACE: return LOG_LEVEL_VALUE_TRACE return None
47b4f238069905f9c9f8a668cb18766a8f883a5e
3,647,120
import types def is_iterator(obj): """ Predicate that returns whether an object is an iterator. """ return type(obj) == types.GeneratorType or ('__iter__' in dir(obj) and 'next' in dir(obj))
db57a2a1f171a48cc43ba4c248387191780dfd04
3,647,121
import shutil def delete_entries(keytab_file: str, slots: t.List[int]) -> bool: """ Deletes one or more entries from a Kerberos keytab. This function will only delete slots that exist within the keylist. Once the slots are deleted, the current keylist will be written to a temporary file. This avoids having the keylist appended to the keylist within the keytab file. Once the keylist is written to the temporary file, the temporary file will be move/renamed the original keytab filename. :param keytab_file: Kerberos V5 keytab file name. The file can be a relative path read from the user's home directory. :param slots: list of slots to be deleted from the keylist. :return: True on success, otherwise False. """ keytab_file = ktutil.keytab_exists(keytab_file) if not keytab_file or not isinstance(slots, list): return False keytab_tmp = ktutil.resolve_keytab_file(f"{keytab_file}.tmp") kt = ktutil() # Read the Kerberos keytab file first to check if slots exist before # trying to delete them. kt.read_kt(keytab_file) kt.list() kt.quit() existing_slots = [ key["slot"] for key in kt.keylist if key["slot"] in slots] if len(existing_slots) == 0: return False # No slots exist to be deleted. # Re-initialize 'ktutil' command and delete the slot(s). # Write the current keylist to a temporary file, then rename # the temporary file to the original name. This avoids the # duplication caused by the ``write_kt`` invocation. kt.ktutil_init() kt.read_kt(keytab_file) for slot in existing_slots: kt.delete_entry(slot) kt.write_kt(keytab_tmp) kt.quit() shutil.move(keytab_tmp, keytab_file) return True if kt.error else False
cc488778abdc75a9814702642fcfc9b245b6a99c
3,647,122
def detect(environ, context=None): """ parse HTTP user agent string and detect a mobile device. """ context = context or Context() try: ## if key 'HTTP_USER_AGENT' doesn't exist, ## we are not able to decide agent class in the first place. ## so raise KeyError to return NonMobile agent. carrier = detect_fast(environ['HTTP_USER_AGENT']) ## if carrier is 'nonmobile', raise KeyError intentionally factory_class = { 'docomo' : context.docomo_factory, 'ezweb' : context.ezweb_factory, 'softbank': context.softbank_factory, 'willcom' : context.willcom_factory, }[carrier] return factory_class().create(environ, context) except KeyError: return NonMobile(environ, context)
2977cb2847c4917904cc096c5787b6ddb3a889b9
3,647,123
import os import urllib def _sniff_scheme(uri_as_string): """Returns the scheme of the URL only, as a string.""" # # urlsplit doesn't work on Windows -- it parses the drive as the scheme... # no protocol given => assume a local file # if os.name == 'nt' and '://' not in uri_as_string: uri_as_string = 'file://' + uri_as_string return urllib.parse.urlsplit(uri_as_string).scheme
0dcff614b026a5bbaf87fce56ed1465d78eda58b
3,647,124
async def get_product(id: UUID4): # noqa: A002 """Return ProductGinoModel instance.""" return await ProductGinoModel.get_or_404(id)
f7988faf08da081a1922f8df24b843be67658c16
3,647,125
def LSIIR_unc(H,UH,Nb,Na,f,Fs,tau=0): """Design of stabel IIR filter as fit to reciprocal of given frequency response with uncertainty Least-squares fit of a digital IIR filter to the reciprocal of a given set of frequency response values with given associated uncertainty. Propagation of uncertainties is carried out using the Monte Carlo method. Parameters ---------- H: np.ndarray frequency response values. UH: np.ndarray uncertainties associated with real and imaginary part of H Nb: int order of IIR numerator polynomial. Na: int order of IIR denominator polynomial. f: np.ndarray frequencies corresponding to H Fs: float sampling frequency for digital IIR filter. tau: float initial estimate of time delay for filter stabilization. Returns ------- b,a: np.ndarray IIR filter coefficients tau: int time delay (in samples) Uba: np.ndarray uncertainties associated with [a[1:],b] References ---------- * Eichstädt, Elster, Esward and Hessling [Eichst2010]_ .. seealso:: :mod:`PyDynamic.uncertainty.propagate_filter.IIRuncFilter` :mod:`PyDynamic.deconvolution.fit_filter.LSIIR` """ runs = 1000 print("\nLeast-squares fit of an order %d digital IIR filter to the" % max(Nb,Na)) print("reciprocal of a frequency response given by %d values.\n" % len(H)) print("Uncertainties of the filter coefficients are evaluated using\n"\ "the GUM S2 Monte Carlo method with %d runs.\n" % runs) HRI = np.random.multivariate_normal(np.hstack((np.real(H),np.imag(H))),UH,runs) HH = HRI[:,:len(f)] + 1j*HRI[:,len(f):] AB = np.zeros((runs,Nb+Na+1)) Tau= np.zeros((runs,)) for k in range(runs): bi,ai,Tau[k] = LSIIR(HH[k,:],Nb,Na,f,Fs,tau,verbose=False) AB[k,:] = np.hstack((ai[1:],bi)) bi = np.mean(AB[:,Na:],axis=0) ai = np.hstack((np.array([1.0]),np.mean(AB[:,:Na],axis=0))) Uab= np.cov(AB,rowvar=0) tau = np.mean(Tau) return bi,ai, tau, Uab
d262065fdfe49514101ff65a6c4ea7329e8aef84
3,647,126
import time def genMeasureCircuit(H, Nq, commutativity_type, clique_cover_method=BronKerbosch): """ Take in a given Hamiltonian, H, and produce the minimum number of necessary circuits to measure each term of H. Returns: List[QuantumCircuits] """ start_time = time.time() term_reqs = np.full((len(H[1:]),Nq),'*',dtype=str) for i, term in enumerate(H[1:]): for op in term[1]: qubit_index = int(op[1:]) basis = op[0] term_reqs[i][qubit_index] = basis # Generate a graph representing the commutativity of the Hamiltonian terms comm_graph = commutativity_type.gen_comm_graph(term_reqs) num_terms = len(comm_graph) # Find a set of cliques within the graph where the nodes in each clique # are disjoint from one another. try: max_cliques = clique_cover_method(comm_graph) except RecursionError as re: print('Maximum recursion depth reached: {}'.format(re.args[0])) return 0, 0, 0 end_time = time.time() print('MEASURECIRCUIT: {} found {} unique circuits'.format( clique_cover_method.__name__, len(max_cliques))) et = end_time - start_time print('MEASURECIRCUIT: Elapsed time: {:.6f}s'.format(et)) return num_terms, len(max_cliques), et
1128592a61da7601e41c0328abbf8770f187d009
3,647,127
def run_test(session, m, data, batch_size, num_steps): """Runs the model on the given data.""" costs = 0.0 iters = 0 state = session.run(m.initial_state) for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)): cost, state = session.run([m.cost, m.final_state], { m.input_data: x, m.targets: y, m.initial_state: state }) costs += cost iters += 1 return costs / iters
681a11e7cd52c0f690a3ad79e69fac4951906796
3,647,128
def table_to_dict(table): """Convert Astropy Table to Python dict. Numpy arrays are converted to lists. This Can work with multi-dimensional array columns, by representing them as list of list. e.g. This is useful in the following situation. foo = Table.read('foo.fits') foo.to_pandas() <- This will not work if columns are multi-dimensional. The alternative is: foo = Table.read('foo.fits') bar = table_to_dict(foo) df = pd.DataFrame(bar, columns=bar.keys()) <- The desired result. """ total_data = {} multi_cols = [] for i, _ in enumerate(table.columns): # This looks unusual, but it is the only way to iterate over columns. col = table.columns[i] data = table[col.name].tolist() total_data[col.name] = data if len(col.shape) == 2: multi_cols.append(col.name) return total_data, multi_cols
8ad9206222101bbd4d40913e3b43c8ffee9dd6ad
3,647,129
def margin_loss(y_true, y_pred): """ Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it. :param y_true: [None, n_classes] :param y_pred: [None, num_capsule] :return: a scalar loss value. """ L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \ 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1)) R = K.mean(K.sum(L, 1)) return R
252c26949b6255742df90cc9c83a586dfcbb8ac6
3,647,130
def strip_quotes(string): """Remove quotes from front and back of string >>> strip_quotes('"fred"') == 'fred' True """ if not string: return string first_ = string[0] last = string[-1] if first_ == last and first_ in '"\'': return string[1:-1] return string
7e10d37e5b5bb4569c88b4de17ffde31a4456e15
3,647,131
from xclim.core.indicator import registry def generate_local_dict(locale: str, init_english: bool = False): """Generate a dictionary with keys for each indicators and translatable attributes. Parameters ---------- locale : str Locale in the IETF format init_english : bool If True, fills the initial dictionary with the english versions of the attributes. Defaults to False. """ if locale in _LOCALES: locname, attrs = get_local_dict(locale) for ind_name in attrs.copy().keys(): if ind_name != "attrs_mapping" and ind_name not in registry: attrs.pop(ind_name) else: attrs = {} attrs_mapping = attrs.setdefault("attrs_mapping", {}) attrs_mapping.setdefault("modifiers", [""]) for key, value in default_formatter.mapping.items(): attrs_mapping.setdefault(key, [value[0]]) eng_attr = "" for ind_name, indicator in registry.items(): ind_attrs = attrs.setdefault(ind_name, {}) for translatable_attr in set(TRANSLATABLE_ATTRS).difference( set(indicator._cf_names) ): if init_english: eng_attr = getattr(indicator, translatable_attr) if not isinstance(eng_attr, str): eng_attr = "" ind_attrs.setdefault(f"{translatable_attr}", eng_attr) for cf_attrs in indicator.cf_attrs: # In the case of single output, put var attrs in main dict if len(indicator.cf_attrs) > 1: ind_attrs = attrs.setdefault(f"{ind_name}.{cf_attrs['var_name']}", {}) for translatable_attr in set(TRANSLATABLE_ATTRS).intersection( set(indicator._cf_names) ): if init_english: eng_attr = cf_attrs.get(translatable_attr) if not isinstance(eng_attr, str): eng_attr = "" ind_attrs.setdefault(f"{translatable_attr}", eng_attr) return attrs
34398b297bb269df4668a09d055a69d409fe7bec
3,647,132
import csv def generate_scheme_from_file(filename=None, fileobj=None, filetype='bson', alimit=1000, verbose=0, encoding='utf8', delimiter=",", quotechar='"'): """Generates schema of the data BSON file""" if not filetype and filename is not None: filetype = __get_filetype_by_ext(filename) datacache = [] if filetype == 'bson': if filename: source = open(filename, 'rb') else: source = fileobj n = 0 for r in bson.decode_file_iter(source): n += 1 if n > alimit: break datacache.append(r) if filename: source.close() elif filetype == 'jsonl': if filename: source = open(filename, 'r', encoding=encoding) else: source = fileobj n = 0 for r in source: n += 1 if n > alimit: break datacache.append(orjson.loads(r)) if filename: source.close() elif filetype == 'csv': if filename: source = open(filename, 'r', encoding=encoding) else: source = fileobj n = 0 reader = csv.DictReader(source, quotechar=quotechar, delimiter=delimiter, quoting=csv.QUOTE_ALL) for r in reader: n += 1 if n > alimit: break datacache.append(r) if filename: source.close() n = 0 scheme = None for r in datacache: n += 1 if scheme is None: scheme = get_schema(r) else: scheme = merge_schemes([scheme, get_schema(r)]) return scheme
5a160dabd6141724075e3645342b804b556094d6
3,647,133
def getTests(): """Returns a dictionary of document samples for the Entity Types Person, Location, and Organization. Returns: [type] -- Returns a dictionary of document samples for the Entity Types Person, Location, and Organization. """ personDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Person'}, {'sentences.5': {'$exists': True}}]}) # Person and at least 5 test sentences locationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Location'}, {'sentences.5': {'$exists': True}}]}) # Location and at least 5 test sentences organizationDocument = gtWorkingCopy.find_one({"$and": [{'entity_type': 'Organization'}, {'sentences.5': {'$exists': True}}]}) # Organization and at least 5 test sentences tests = {"person": personDocument, "location": locationDocument, "organization": organizationDocument} return tests
63d1ff2e2f77f33ef634d495a1d318f688a1d51b
3,647,134
def ts_cor(a, b, min_sample = 3, axis = 0, data = None, state = None): """ ts_cor(a) is equivalent to a.cor()[0][1] - supports numpy arrays - handles nan - supports state management :Example: matching pandas ------------------------- >>> # create sample data: >>> from pyg_timeseries import *; import pandas as pd; import numpy as np >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0.5] = np.nan >>> b = pd.Series(np.random.normal(0,1,10000), drange(-9999)); b[b>0.5] = np.nan >>> state = data = None; min_sample = 3; axis = 0 >>> df = pd.concat([a,b], axis=1) >>> assert abs(df.corr()[0][1] - ts_cor(a, b))<1e-10 :Example: slightly faster than pandas ------------------------------------- %timeit ts_cor(a, b) 245 µs ± 6.43 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) %timeit df.corr()[0][1] 575 µs ± 13 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) :Example: numpy ----------------------------------- >>> assert ts_cor(a.values, b.values) == ts_cor(a,b) :Example: state management ------------------------------------------- >>> old = ts_std_(a.iloc[:2000]) >>> new = ts_std(a.iloc[2000:], vec = old.vec) >>> assert new == ts_std(a) """ state = state or dict(vec = _vec(a, None,6,0.)) rtn = first_(_ts_cor(a, b, min_sample = min_sample, **state)) return rtn
217b8c2196c270ffe22905884586b7466eb59c88
3,647,135
def get_robin_bndry_conditions(kappa,alpha,Vh): """ Do not pass element=function_space.ufl_element() as want forcing to be a scalar pass degree instead """ bndry_obj = get_2d_unit_square_mesh_boundaries() boundary_conditions=[] ii=0 for phys_var in [0,1]: for normal in [1,-1]: boundary_conditions.append( ['robin', bndry_obj[ii], [RobinBoundaryRHS(kappa,normal,alpha,'real',phys_var, degree=Vh.ufl_element().degree()), RobinBoundaryRHS(kappa,normal,alpha,'imag',phys_var, degree=Vh.ufl_element().degree())], [dl.Constant(0),alpha]]) ii+=1 return boundary_conditions
65bb3f9d216ddc146866cf8aa570c2ee73e6d7f2
3,647,136
def get_prop_datatypes(labels, propnames, MB=None): """Retrieve the per-property output datatypes.""" rp = regionprops(labels, intensity_image=MB, cache=True) datatypes = [] for propname in propnames: if np.array(rp[0][propname]).dtype == 'int64': datatypes.append('int32') else: datatypes.append('float') return datatypes
b706e724bee867a23c290580f2b865d967946d1b
3,647,137
from typing import Tuple def parse_html(html: str) -> Tuple[str, str]: """ This function parses the html, strips the tags an return the title and the body of the html file. Parameters ---------- html : str The HTML text Returns ------- Tuple[str, str] A tuple of (title, body) """ # doc = pq(html) title = doc("title").text() body = doc("body").text() return (title, body)
a57e5ae50c8eae16c06333a3de5cc388524504ab
3,647,138
from .._common import header def block(keyword, multi=False, noend=False): """Decorate block writing functions.""" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): head_fmt = "{:5}{}" if noend else "{:5}{}\n" out = [head_fmt.format(keyword, header)] out += func(*args, **kwargs) out += ["\n"] if multi else [] return out return wrapper return decorator
5a0ec1cdec6c2f956d47d13f7beea41814a0db5d
3,647,139
def data(path): """Get the file from the specified path from the data directory. Parameters ---------- path : str The relative path to the file in the data directory. Returns ------- file : File The requested file. """ return send_from_directory(app.config['DATA_DIRECTORY'], path)
8f1cde6d248026fbadd2a81519f8f2beed7ab308
3,647,140
import requests import logging def search_software_fuzzy(query, max=None, csv_filename=None): """Returns a list of dict for the software results. """ results = _search_software(query) num = 0 softwares = [] while True: for r in results: r = _remove_useless_keys(r) softwares.append(r) num += len(results) # quit if no results or results number reach the max if num == 0 or (max and num >= max): break query_string = _get_next_page_query(results[-1]['SearchResultDescr']) if not query_string: break try: results = _get_software_search_results(query_string) # Sometimes it responds 50x http error for some keywords, # but it's not the client's fault. except requests.exceptions.HTTPError as e: logging.warning(f'{e.response.status_code} HTTP Error occurred ' f'during pagination: {e.response.url}') break if csv_filename: _write_software_results(softwares, csv_filename) return return softwares
ef0124670b02b148f918d3f573e22ca3f646cf96
3,647,141
def fitCirc(x,y,xerr = None, rIni = None, aveR=False): """ Performs a circle fit to data using least square residuals. Parameters ---------- x : An array of length N. y : An array of length N. xerr : None or an array of length N, If provided, it is the standard-deviation of points. This vector, if given, will be used as weights in the fit. rIni : is a maximum radius of the circle to be fitted. aveR : if True, returns the average deviation from the fit. Returns ------- xc, yc, R : center and the radius of the circle. errorbars : errorbars on the center x, y and the radius. aveResid : (optional) average residual """ x=np.array(x) y=np.array(y) if x.size<2: print('fitCirc: not enough data points to fit circle') return x_m = np.mean(x) y_m = np.mean(y) if xerr == None or all(xerr)==0: xerr = np.ones(len(x)) else: xerr=np.array(xerr) xerr[np.where(xerr==0)]=100 def calc_R(xc, yc): """ calculate the distance of each 2D points from the center (xc, yc) """ return np.sqrt((x-xc)**2 + (y-yc)**2) def resid(pars): """ calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) """ # xc,yc, radius = pars v = pars.valuesdict() xc,yc, radius = v['xc'],v['yc'],v['radius'] Ri = calc_R(xc,yc) if rIni is not None and radius>rIni: return 10000000*(Ri - radius) return (Ri - radius)/np.array(xerr) center_estimate = x_m, y_m radius = calc_R(*center_estimate).mean() if rIni is not None and radius>rIni: radius = rIni params = Parameters() params.add('xc', x_m) params.add('yc', y_m) params.add('radius', radius, min=0) minzer=minimize(resid,params=params) res = minzer.params xc, yc, R = res['xc'].value,res['yc'].value,res['radius'].value errorbars = [res['xc'].stderr,res['yc'].stderr,res['radius'].stderr] aveResid = sum(abs(minzer.residual))/x.size if rIni is not None and R>rIni: print('radius greater than initial, resid=',aveResid) if aveR: return xc,yc,R,errorbars,aveResid else: return xc,yc,R, errorbars
fd384526b385f33a8ddd91c7c631afb204afa0db
3,647,142
def get_clients(): """ Return current clients --- tags: - clients operationId: listClients produces: - application/json schemes: ['http', 'https'] responses: 200: description: List of clients schema: type: array items: $ref: '#/definitions/Client' """ return jsonify_obj(get_locker().get_clients())
e38c8da6094303415bc285f847b9915a4a55f7e7
3,647,143
def GetInstanceListForHypervisor(hname, hvparams=None, get_hv_fn=hypervisor.GetHypervisor): """Provides a list of instances of the given hypervisor. @type hname: string @param hname: name of the hypervisor @type hvparams: dict of strings @param hvparams: hypervisor parameters for the given hypervisor @type get_hv_fn: function @param get_hv_fn: function that returns a hypervisor for the given hypervisor name; optional parameter to increase testability @rtype: list @return: a list of all running instances on the current node - instance1.example.com - instance2.example.com """ try: return get_hv_fn(hname).ListInstances(hvparams=hvparams) except errors.HypervisorError, err: _Fail("Error enumerating instances (hypervisor %s): %s", hname, err, exc=True)
b42e19af31d17ff6ca2343ce274572f15950c8d5
3,647,144
def is_nersc_system(system = system()): """Whether current system is a supported NERSC system.""" return (system is not None) and (system in _system_params.keys())
6ac968e45f7d586d56b28578eb685f705abafe0f
3,647,145
def is_string_type_suspicious_score(confidence_score, params): """ determine if string type confidence score is suspicious in reputation_params """ return not isinstance(confidence_score, int) and CONFIDENCE_LEVEL_PRIORITY.get( params['override_confidence_level_suspicious'], 10) <= CONFIDENCE_LEVEL_PRIORITY.get(confidence_score.lower(), -1)
9282ca6e58638fb240ca4c0b752a6dddbaa05255
3,647,146
def align_embeddings(base_embed, other_embed, sample_size=1): """Fit the regression that aligns model1 and model2.""" regression = fit_w2v_regression(base_embed, other_embed, sample_size) aligned_model = apply_w2v_regression(base_embed, regression) return aligned_model
3e017f1a0049cac40f6f7311be2dd531895fc436
3,647,147
from typing import Optional import logging def validate_dict_keys(dict_to_check: dict, allowed_keys: set, necessary_keys: Optional[set] = None, dict_name: Optional[str] = None) -> bool: """If you use dictionaries to pass parameters, there are two common errors: * misspelled keys * necessary keys are missing This functions checks whether all keys are in the set of allowed_keys and raises ValueError if a unknown key is found. It can also check whether all necessary keys are present and raises ValueError if not. dict_name can be used for a better error message.""" if not dict_name: # fallback to neutral dict_name = 'dictionary' # In case something other than a set is provided: allowed_keys = convert_to_set(allowed_keys) if necessary_keys: # also make sure it is a set: necessary_keys = convert_to_set(necessary_keys) # Are all necessary keys in the allowed key list? if len(necessary_keys - allowed_keys) != 0: msg = ("Contradiction: Not all necessary keys " + "are in the allowed keys set!") logging.exception(msg) raise ValueError(msg) # Get all keys in the dictionary: try: found_keys = dict_to_check.keys() except AttributeError as no_dict: raise AttributeError('Expected a dictionary for the dict_to_check ' + 'parameter!') from no_dict # Check for unknown keys: for key in found_keys: if key not in allowed_keys: msg = f"Unknown key {key} in {dict_name}" logging.exception(msg) raise ValueError(msg) logging.debug('No unknown keys found.') # Check if all necessary keys are present: if necessary_keys: for key in necessary_keys: if key not in found_keys: msg = f"Necessary key {key} missing in {dict_name}!" logging.exception(msg) raise ValueError(msg) logging.debug('All necessary keys found.') return True
ede2995994b5616ae4f2f8cb02799295cc008f7f
3,647,148
def data_unmerged(): """ Load HEWL diffraction data from APS 24-ID-C """ datapath = ["data", "data_unmerged.mtz"] return load_dataset(datapath)
f7fb453f617191e19f948fc9097d10bc104478b3
3,647,149
def copy(object, *args): """Copy the object.""" copiedWrapper = wrapCopy( object ) try: copiedWrapper.name = copiedWrapper.name + "_Copy" except AttributeError: pass return copiedWrapper.createAndFillObject(None, *args)
fd5e7dfb3e5d6c920ebcb73477d19c9a8be152d3
3,647,150
def convert_to_n0(n): """ Convert count vector to vector of "greater than" counts. Parameters ------- n : 1D array, size K each entry k represents the count of items assigned to comp k. Returns ------- n0 : 1D array, size K each entry k gives the total count of items at index above k N0[k] = np.sum(N[k:]) Example ------- >> convertToN0([1., 3., 7., 2]) [12, 9, 2] """ n = np.asarray(n) n0 = np.zeros_like(n) n0[:-1] = n[::-1].cumsum()[::-1][1:] return n0
c75ce9e68bc949ef9fed55283c4dd2a424acadc7
3,647,151
def application(environ, start_response): """Serve the button HTML.""" with open('wsgi/button.html') as f: response_body = f.read() status = '200 OK' response_headers = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(response_body))), ] start_response(status, response_headers) return [response_body.encode('utf-8')]
97f1f793f234dbd3c29e9c4a791a224ba32c984b
3,647,152
def get_handler(): """ Return the handler configured by the most recent call to :func:`configure`. If :func:`configure` has not yet been called, this returns ``None``. """ return current_handler
0508343f6775544204de9a35186d92ddf829533f
3,647,153
def display_rf_feature_importance(cache, save_location: str = None): """ Displays which pixels have the most influence in the model's decision. This is based on sklearn,ensemble.RandomForestClassifier's feature_importance array Parameters ---------- save_location : str the location to save the figure on disk. If None, the plot is displayed on runtime and not saved. cache : dict the cache dict returned by the classifier. Must at least include ['actual', 'prediction'] objects, each with ['train', 'test'] arrays Returns ------- matplotlib.pyplot.figure : the figure """ fig = plt.figure() plt.title("Pixel importance in random forest classification") plt.imshow(cache['model'].feature_importances_.reshape((28,28)), extent=[0,28,28,0]) plt.colorbar() if save_location is None: plt.show() else: plt.savefig(fname=save_location) return fig
86fc921f4f3ffcd004a2995b0a82f69ba3088e5a
3,647,154
def first_order(A, AB, B): """ First order estimator following Saltelli et al. 2010 CPC, normalized by sample variance """ return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
3a94fdcf17a10242fb07d60e1468a21e17182a25
3,647,155
import array def mod_df(arr,timevar,istart,istop,mod_name,ts): """ return time series (DataFrame) from model interpolated onto uniform time base """ t=timevar.points[istart:istop] jd = timevar.units.num2date(t) # eliminate any data that is closer together than 10 seconds # this was required to handle issues with CO-OPS aggregations, I think because # they use floating point time in hours, which is not very accurate, so the FMRC # aggregation is aggregating points that actually occur at the same time dt =diff(jd) s = array([ele.seconds for ele in dt]) ind=where(s>10)[0] arr=arr[ind+1] jd=jd[ind+1] b = pd.DataFrame(arr,index=jd,columns=[mod_name]) # eliminate any data with NaN b = b[isfinite(b[mod_name])] # interpolate onto uniform time base, fill gaps up to: (10 values @ 6 min = 1 hour) c = pd.concat([b, ts],axis=1).interpolate(limit=10) return c
6740d74bcfa82a3f813b991b8593b9c2cd5fddb5
3,647,156
def hydrate_reserve_state(data={}): """ Given a dictionary, allow the viewmodel to hydrate the data needed by this view """ vm = State() return vm.hydrate(data)
203c9c4143713cf8f386a2ac95d91b50a9525a3c
3,647,157
def get_devstudio_versions (): """Get list of devstudio versions from the Windows registry. Return a list of strings containing version numbers; the list will be empty if we were unable to access the registry (eg. couldn't import a registry-access module) or the appropriate registry keys weren't found.""" if not _can_read_reg: return [] K = 'Software\\Microsoft\\Devstudio' L = [] for base in (HKEY_CLASSES_ROOT, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS): try: k = RegOpenKeyEx(base,K) i = 0 while 1: try: p = RegEnumKey(k,i) if p[0] in '123456789' and p not in L: L.append(p) except RegError: break i = i + 1 except RegError: pass L.sort() L.reverse() return L
ad02e38e216649cb0f29cfffe256aee7b79d80ea
3,647,158
def mapDictToProfile(wordD, tdm): """ Take the document in as a dictionary with word:wordcount format, and map it to a p profile Parameters ---------- wordD : Dictionary Dictionary where the keys are words, and the values are the corrosponding word count tdm : termDocMatrix object The trained and factorized term-document matrix structure Returns ------- p : numpy array The mapped vector profile for the string Notes ----- References ---------- Examples -------- """ p = np.zeros(len(tdm.terms)) tfweight = float(sum(wordD.values())) for i in range(len(tdm.terms)): if tdm.terms[i] in wordD: p[i] = wordD[tdm.terms[i]]/tfweight #print len(p) p = np.multiply(tdm.idfs, p).transpose() return p
df178e7a6857ff9f85caedbfb5abac77e4f04d55
3,647,159
from typing import VT from typing import Tuple from typing import List def _to_tikz(g: BaseGraph[VT,ET], xoffset:FloatInt=0, yoffset:FloatInt=0, idoffset:int=0) -> Tuple[List[str],List[str]]: """Converts a ZX-graph ``g`` to a string representing a tikz diagram. The optional arguments are used by :func:`to_tikz_sequence`. """ verts = [] maxindex = idoffset for v in g.vertices(): p = g.phase(v) ty = g.type(v) if ty == VertexType.BOUNDARY: style = "none" elif ty == VertexType.H_BOX: style = "hadamard" else: style = 'Z' if ty==VertexType.Z else 'X' if p != 0: style += " phase" style += " dot" if (ty == VertexType.H_BOX and p == 1) or (ty != VertexType.H_BOX and p == 0): phase = "" else: ns = '' if p.numerator == 1 else str(p.numerator) dn = '' if p.denominator == 1 else str(p.denominator) if dn: phase = r"$\frac{%s\pi}{%s}$" % (ns, dn) else: phase = r"$%s\pi$" % ns x = g.row(v) + xoffset y = - g.qubit(v) - yoffset s = " \\node [style={}] ({:d}) at ({:.2f}, {:.2f}) {{{:s}}};".format(style,v+idoffset,x,y,phase) # type: ignore verts.append(s) maxindex = max([v+idoffset,maxindex]) # type: ignore edges = [] for e in g.edges(): v,w = g.edge_st(e) ty = g.edge_type(e) s = " \\draw " if ty == EdgeType.HADAMARD: if g.type(v) != VertexType.BOUNDARY and g.type(w) != VertexType.BOUNDARY: s += "[style=hadamard edge] " else: x = (g.row(v) + g.row(w))/2.0 +xoffset y = -(g.qubit(v)+g.qubit(w))/2.0 -yoffset t = " \\node [style=hadamard] ({:d}) at ({:.2f}, {:.2f}) {{}};".format(maxindex+1, x,y) verts.append(t) maxindex += 1 s += "({:d}) to ({:d});".format(v+idoffset,w+idoffset) # type: ignore edges.append(s) return (verts, edges)
0c5fab1fcd0f5db9e8b7d267de5e4b5ea2444046
3,647,160
def _fix_server_adress(raw_server): """ Prepend http:// there. """ if not raw_server.startswith("http://"): raw_server = "http://" + raw_server return raw_server
64171be5033930fd5ecb3cd275cc0d859b7e6ca0
3,647,161
def _parse_output_keys(val): """Parse expected output keys from string, handling records. """ out = {} for k in val.split(","): # record output if ":" in k: name, attrs = k.split(":") out[name] = attrs.split(";") else: out[k] = None return out
abd739026574b1a3fa87c42d2719d172e36a1c4a
3,647,162
import sqlite3 def check(sync_urls: list, cursor: sqlite3.Cursor, db: sqlite3.Connection, status: str): """Checking update in the back. Args: sync_urls: URL(s) to be checked as a list cursor: Cursor object of sqlite3 db: Connection object of sqlite3 status: 'viewed' or 'unviewed' Return: Set of update links """ out_updates = [] for sync_url in sync_urls: links_fetch = [] links_from_db = [] https_updates = [] sync_url = sync_url.strip("/") f_links = fetch(sync_url) # .split(",") for f_link in set(f_links): links_fetch.append(f_link.strip()) db_links = cursor.execute( "SELECT link FROM links JOIN urls ON links.url_id=urls.url_id WHERE urls.url=?", (sync_url,), ) for link in db_links: links_from_db.append(link[0]) updates = [x for x in links_fetch if x not in set(links_from_db)] url_split = sync_url.split("/") for update in updates: if sync_url in update: https_updates.append(update) elif len(url_split) > 3: url_split = url_split[:3] https_updates.append("/".join(url_split) + "/" + update.strip("/")) else: https_updates.append(sync_url + "/" + update.strip("/")) url_id = cursor.execute( "SELECT url_id FROM urls WHERE url=?", (sync_url,) ).fetchone()[0] for update in updates: items = (url_id, update, status) cursor.execute( "INSERT INTO links (url_id, link, status) VALUES (?, ?, ?)", items ) db.commit() out_updates.extend(https_updates) return set(out_updates)
a9d7160d7f08d51b4ef596692fa10136f1f21375
3,647,163
from typing import Dict from typing import Tuple from typing import OrderedDict from typing import Type import torch def _get_output_structure( text: str, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, tokenizer_args: Dict, ) -> Tuple[OrderedDict, Type]: """Function needed for saving in a dictionary the output structure of the transformers model. """ encoded_input = tokenizer([text], **tokenizer_args) output = model(**encoded_input) structure = OrderedDict() for key, value in output.items(): if isinstance(value, torch.Tensor): structure[key] = None else: size = _get_size_recursively(value) structure[key] = size return structure, type(output)
0f268b3fb9208b8f447b0c030744b9ce310049e6
3,647,164
def encode_board(board): """ Encode the 2D board list to a 64-bit integer """ new_board = 0 for row in board.board: for tile in row: new_board <<= 4 if tile is not None: new_board += tile.val return new_board
2c85964902dc3b2d097e30b71f11e7c17b80297a
3,647,165
def get_symbol(i): """Get the symbol corresponding to int ``i`` - runs through the usual 52 letters before resorting to unicode characters, starting at ``chr(192)``. Examples -------- >>> get_symbol(2) 'c' >>> oe.get_symbol(200) 'Ŕ' >>> oe.get_symbol(20000) '京' """ if i < 52: return einsum_symbols_base[i] return chr(i + 140)
e6e9a91fa48e04ed591b22fba62bf2ba6fd5d81f
3,647,166
def longest_common_substring(string1, string2): """ Function to find the longest common substring of two strings """ m = [[0] * (1 + len(string2)) for i in range(1 + len(string1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(string1)): for y in range(1, 1 + len(string2)): if string1[x - 1] == string2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return string1[x_longest - longest: x_longest]
f567c629f5bd02143f0ed6bbbdc11f0e59e5f4bd
3,647,167
def elapsed_time_id(trace, event_index: int): """Calculate elapsed time by event index in trace :param trace: :param event_index: :return: """ try: event = trace[event_index] except IndexError: # catch for 0 padding. # calculate using the last event in trace event = trace[-1] return elapsed_time(trace, event)
7e94531f13458fc32ca5d971178b79fc13aa65f8
3,647,168
def build_candidate_digest(proof, leaf_hash): """ Build the candidate digest representing the entire ledger from the Proof hashes. :type proof: dict :param proof: The Proof object. :type leaf_hash: bytes :param leaf_hash: The revision hash to pair with the first hash in the Proof hashes list. :rtype: bytes :return: The calculated root hash. """ parsed_proof = parse_proof(proof) root_hash = calculate_root_hash_from_internal_hashes(parsed_proof, leaf_hash) return root_hash
1434cf5e1da9edbd6b41caacd42a67df235267f5
3,647,169
from typing import Iterable import torch def confusion_matrix_eval(cnn, data_loader): """Retrieves false positives and false negatives for further investigation Parameters ---------- cnn : torchvision.models A trained pytorch model. data_loader : torch.utils.data.DataLoader A dataloader iterating through the holdout test sample. Returns ------- dict Dictionary containing cases model classified as false positives and false negatives. Raises ------ ValueError Raised if data_loader is not iterable. """ if not isinstance(data_loader, Iterable): raise ValueError("data_loader is not iterable") fp = [] fn = [] cnn.eval() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") with torch.no_grad(): for i, (inputs, classes) in enumerate(data_loader): inputs = inputs.to(device) classes = classes.to(device) outputs = cnn(inputs).flatten() preds = torch.sigmoid(outputs) > 0.5 j = 0 for t, p in zip(classes.view(-1), preds.view(-1)): if [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [ 0.0, 1.0, ]: fp.append( data_loader.dataset.samples[(i * data_loader.batch_size + j)][1] ) elif [float(t.cpu().numpy()), float(p.long().cpu().numpy())] == [ 1.0, 0.0, ]: fn.append( data_loader.dataset.samples[(i * data_loader.batch_size + j)][1] ) j += 1 return {"false_positives": fp, "false_negatives": fn}
411a7d95f7713ff00dbcb7b25db7bd497f427593
3,647,170
import hashlib def match_by_hashed_faceting(*keys): """Match method 3 - Hashed Faceted Search""" matches = [] hfs = [] for i in range(len(__lookup_attrs__)): key = [x for x in keys if x[0] == __lookup_attrs__[i]] if key: hfs.append(key[0]) hashed_val = hashlib.sha256(str(hfs).encode('utf-8')).hexdigest() hashed_key = keynamehelper.create_key_name("hfs", hashed_val) for found_key in redis.sscan_iter(hashed_key): matches.append(found_key) return matches
b2b849583e732747b42a4d4e7ec56c1090ddb1a8
3,647,171
def get_derivative_density_matrix(mat_diag,mat_Rabi,sigma_moins_array,**kwargs): """ Returns function for t-evolution using the numerical integration of the density matrix \dot{\rho}=-i(H_eff \rho-\rho H_eff^{\dagger}) +\Gamma \sum_j \sigma_j^_ \rho \sigma_j^+ """ dim=len(mat_diag) tunneling=kwargs.get('tunneling','on') if tunneling=='off': def L_on_rho_loc(tt,yy): yy=np.reshape(yy, (dim,dim)) H_eff=csr_matrix(square_mat(mat_diag)) deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array) return np.reshape(deriv, dim*dim) return L_on_rho_loc else: def L_on_rho_loc(tt,yy): yy=np.reshape(yy, (dim,dim)) H_eff=csr_matrix(mat_Rabi+square_mat(mat_diag)) deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array) return np.reshape(deriv, dim*dim) return L_on_rho_loc
1dc1321a6578b6bd9b3c5e937f9c7ed8a787f5a6
3,647,172
import os import threading def start_http_server(): """Starts a simple http server for the test files""" # For the http handler os.chdir(TEST_FILES_DIR) handler = SimpleHTTPServer.SimpleHTTPRequestHandler handler.extensions_map['.html'] = 'text/html; charset=UTF-8' httpd = ThreadedTCPServer(("localhost", 0), handler) ip, port = httpd.server_address httpd_thread = threading.Thread(target=httpd.serve_forever) httpd_thread.setDaemon(True) httpd_thread.start() return (ip, port, httpd, httpd_thread)
30455072e0213d8a509caccfe1c0bc50d6b33512
3,647,173
def area_of_polygon(polygon): """ Returns the area of an OpenQuake polygon in square kilometres """ lon0 = np.mean(polygon.lons) lat0 = np.mean(polygon.lats) # Transform to lamber equal area projection x, y = lonlat_to_laea(polygon.lons, polygon.lats, lon0, lat0) # Build shapely polygons poly = geometry.Polygon(zip(x, y)) return poly.area
bc57bd58b2ae64b33e34b1bee4582e3c6733fe4d
3,647,174
def build_dataset_values(claim_object, data_value): """ Build results with different datasets. Parameters: claim_object (obj): Onject to modify and add to rows . data_value (obj): result object Returns: Modified claim_boject according to data_value.type """ if data_value["type"] == "globecoordinate": claim_object["str"] = str(data_value["value"]["latitude"]) + "," + str(data_value["value"]["longitude"]) elif data_value["type"] == "time": claim_object["date"] = data_value["value"]["time"].split("T")[0].split("+")[1] elif data_value["type"] == "string": claim_object["str"] = data_value["value"] else: pass return claim_object
f3d267a4e9ac099f6d2313deffb2f45d35b90217
3,647,175
import os def get_yolk_dir(): """Return location we store config files and data.""" return os.path.abspath('%s/.yolk' % os.path.expanduser('~'))
788e44d2f95ce720d10154465198a3f86625453b
3,647,176
def gen_csrv_msome(shape, n_parts, mic_rad, min_ip_dst): """ Generates a list of 3D coordinates and rotations a CSRV pattern :param shape: tomogram shape :param n_parts: number of particles to try to generate :param mic_rad: microsome radius :param min_ip_dst: minimum interparticle distance :param c_jump_prob: probabilty to create a new cluster evaluated each time a particle is addded [0, 1] :return: two output lists; coordinates and rotations """ # Initialization count = 0 min_ip_dst_2 = float(min_ip_dst) ** 2 locs, rots = list(), list() mic_cent = .5 * np.asarray(shape, dtype=np.float) mic_rad_f = float(mic_rad) max_n_tries = np.prod(np.asarray(shape, dtype=np.int)) # Loop for particles mic_end, n_try = False, 0 p_end = False while not p_end: p_cent = np.random.randn(1, 3)[0] norm = mic_rad_f / np.linalg.norm(p_cent) p_cent *= norm p_cent += mic_cent # Check that the particle is within the tomogram if (p_cent[0] >= 0) and (p_cent[0] < shape[0]) \ and (p_cent[1] >= 0) and (p_cent[1] < shape[1]) \ and (p_cent[2] >= 0) and (p_cent[2] < shape[2]): if len(locs) > 0: # Check that the new particle does not overlap with other already inserted hold_dst = p_cent - np.asarray(locs, dtype=np.float) if np.sum(hold_dst * hold_dst, axis=1) >= min_ip_dst_2: locs.append(p_cent) tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:] rots.append((360. * np.random.rand() - 180., tilt, psi)) count += 1 else: locs.append(p_cent) tilt, psi = vect_to_zrelion(p_cent - mic_cent, mode='active')[1:] rots.append((360. * np.random.rand() - 180., tilt, psi)) count += 1 # Ensure termination n_try += 1 if (n_try > max_n_tries) or (count >= n_parts): p_end = True return locs, rots
ba0033859e4e18b55d877a6db957b64674171d74
3,647,177
def electra_model(request): """Exposes the command-line option to a test case.""" electra_model_path = request.config.getoption("--electra_model") if not electra_model_path: pytest.skip("No --electra_model given") else: return electra_model_path
9273ddfe253c7dc0ab3307b4fb6f009aa806821b
3,647,178
def By_2d_approximation(x, w, d, j): """Approximation of By_surface valid except near edges of slab.""" mu0_over_4pi = 1e-7 return 2e-7 * j * d * np.log((w/2 + x) / (w/2 - x))
2d64957d0dbec677b6d8b6e825c18ac32b25f7e7
3,647,179
from typing import Dict from pathlib import Path from typing import List def read_lists(paths: Dict[str, Path]) -> Dict[str, List[str]]: """Return a dictionary of song lists read from file. Arguments: paths {Dict[str, Path]} -- A dictionary of type returned by find_paths. Returns: Dict[str, List[str]] -- The keys are a string song list id ('1' to '6' or 'F'), and the value lists contains the song keys to be written to that list. """ sl_dict: Dict[str, List[str]] = dict() for song_list_id, file_path in paths.items(): logger.log_this( f"Reading file '{file_path.name}'' for song list '{song_list_id}'." ) with open(file_path, "rt", encoding="locale") as file_handle: song_list = simplejson.load(file_handle) # structure checks - could have used a schema for this. # because I'm a bit lazy here, might also fail if a song key # is pure digits and has been converted to a number on the way in # We can tidy this up if it ever happens. if not isinstance(song_list, list): raise TypeError( f"Invalid format in file '{file_path.name}'." f"\n This should be a JSON list of strings, but I found " f"a {type(song_list)}." ) for val in song_list: if not isinstance(val, str): raise TypeError( f"Invalid song list member in file '{file_path.name}'." f"\n This should be a JSON list of strings, but I found " f"a member with {type(val)}." ) # just to be sure, clean out white space and empty strings silently. song_list = [x for x in song_list if x.strip() != ""] sl_dict[song_list_id] = song_list logger.log_this("All song list files passed structure tests.") return sl_dict
13d6618293e17e5a7388ca9f3d44ffffe913f482
3,647,180
def learningCurve(theta, X_train, y_train, X_cv, y_cv, lambda_param): """ :param X_train: :param y_train: :param X_cv: :param y_cv: :param lambda_param: :return: """ number_examples = y_train.shape[0] J_train, J_cv = [], [] for i in range(1, number_examples + 1): theta, _ = gradientDescent(theta, X_train[:i, :], y_train[:i, :], 0.001, 3000, lambda_param) cost_train = linearRegressionCostFunction(theta, X_train[0:i, :], y_train[:i, :], lambda_param) J_train.append(cost_train) cost_cv = linearRegressionCostFunction(theta, X_cv, y_cv, lambda_param) J_cv.append(cost_cv) return J_train, J_cv
c15f5b3af34beb4c20982c2b339919c50b3336b1
3,647,181
def reduce_labels(y): """Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe""" labels = [] # new y themes = [] disciplines = [] for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines = [] #print("\nlabels in y an der Stelle %s: %s" % (i, elements)) for element in elements: #print("\nLabel:", element) # themes for key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print("\nTheme key:", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print("\nTheme:", key) else: ("Element nicht gefunden:", element) # discipilnes for key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print("\nDiscipline key:", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print("\nDiscipline:", key) else: ("Element nicht gefunden:", element) #print("\ntmp_list:", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print("\nnew labelset:", labels) return labels, themes, disciplines
9dc5d3b1d07f7fd7f72911b891533d90bae4ad63
3,647,182
def api_get_categories(self): """ Gets a list of all the categories. """ response = TestCategory.objects.all() s = "" for cat in response: s += b64(cat.name) + "," + b64(cat.description) + "," return HttpResponse(s.rstrip(','))
f7c523437c8f9d538bb4c5411232458ae3e10450
3,647,183
def histtab(items, headers=None, item="item", count="count", percent="percent", cols=None): """Make a histogram table.""" if cols is not None: # items is a Table. items = items.as_tuples(cols=cols) if headers is None: headers = cols + [count, percent] if headers is None: headers = [item, count, percent] h = util.hist_dict(items) tab = Table(headers=headers) tot = float(sum(h.itervalues())) hist_items = h.items() if cols is not None: for key, val in hist_items: row = dict(zip(cols, key)) row[count] = val tab.append(row) else: for key, val in hist_items: tab.append({item: key, count: val}) if percent is not None: for i, (key, val) in enumerate(hist_items): tab[i][percent] = val / tot tab.sort(col=count, reverse=True) return tab
ca6bc51d80a179f693ca84fa27753908dcf30ca8
3,647,184
from rdkit import Chem def read_sdf_to_mol(sdf_file, sanitize=False, add_hs=False, remove_hs=False): """Reads a list of molecules from an SDF file. :param add_hs: Specifies whether to add hydrogens. Defaults to False :type add_hs: bool :param remove_hs: Specifies whether to remove hydrogens. Defaults to False :type remove_hs: bool :param sanitize: Specifies whether to sanitize the molecule. Defaults to False :type sanitize: bool :return: list of molecules in RDKit format. :rtype: list[rdkit.Chem.rdchem.Mol] """ suppl = Chem.SDMolSupplier(sdf_file, sanitize=sanitize, removeHs=remove_hs) molecules = [mol for mol in suppl] if add_hs: for mol in molecules: if mol is not None: mol = Chem.AddHs(mol, addCoords=True) return molecules
c1917b5dcdad3a88bfd2b181e6c1e757393a4de8
3,647,185
import math def search_and_score(milvus_collection_name, mongo_name, field_name, vectors, topk, nprobe, inner_score_mode: str): """ search vectors from milvus and score by inner field score mode :param milvus_collection_name: collection name will be search :param mongo_name: mongo collection name will be selected from :param field_name: field name for searching from mongodb :param vectors: vectors which will be searched in milvus :param topk: milvus topk number :param nprobe: milvus nprobe number :param inner_score_mode: :return: image id of entity """ result_dbs = [] MAX_TOPK = 2048 magic_number = 60 increase_rate = 0.1 query_topk = topk + magic_number end_flag = False try: inner_score_mode = InnerFieldScoreMode(inner_score_mode) except Exception as e: raise WrongInnerFieldModeError("Unsupported inner field mode", e) while (len(result_dbs) < topk) and (not end_flag): # check query topk max value query_topk = min(query_topk, MAX_TOPK) vids = MilvusIns.search_vectors(milvus_collection_name, vectors, topk=query_topk, nprobe=nprobe) if len(vids) == 0: raise NoneVectorError("milvus search result is None", "") # filter -1 and if exist -1 or len(vids) < topk if (-1 in vids.id_array[0]) or len(vids[0]) < query_topk: end_flag = True # inner field score function here res_vids = get_inner_field_score_result(vids, query_topk, inner_score_mode) if len(res_vids) < topk: if query_topk < MAX_TOPK: # calc a new query_topk and needn't to query from mysql query_topk += math.ceil(query_topk * increase_rate) increase_rate *= 2 if not end_flag: continue end_flag = True result_dbs = MongoIns.search_by_vector_id(mongo_name, field_name, res_vids) # calc a new query_topk if len(result_dbs) < topk query_topk += math.ceil(query_topk * increase_rate) return result_dbs[:topk]
885d0e912e76a379dbb55b15945f898c419254bc
3,647,186
def fix_simulation(): """ Create instance of Simulation class.""" return Simulation()
fc36a880342bb6e6be6e6735c3ebd09891d09502
3,647,187
def build_tree(vectors, algorithm='kd_tree', metric='minkowski', **kwargs): """Build NearestNeighbors tree.""" kwargs.pop('algorithm', None) kwargs.pop('metric', None) return NearestNeighbors(algorithm=algorithm, metric=metric, **kwargs).fit(vectors)
42df608eb0e4e5f420bd0a8391ad748b18eb5f4f
3,647,188
def _expectedValues(): """ These values are expected for well exposed spot data. The dictionary has a tuple for each wavelength. Note that for example focus is data set dependent and should be used only as an indicator of a possible value. keys: l600, l700, l800, l890 tuple = [radius, focus, widthx, widthy] """ out = dict(l600=(0.45, 0.40, 0.34, 0.32), l700=(0.47, 0.40, 0.32, 0.31), l800=(0.49, 0.41, 0.30, 0.30), l800l=(0.49, 0.41, 0.27, 0.27), l800m=(0.49, 0.41, 0.30, 0.30), l800h=(0.49, 0.41, 0.31, 0.31), l890=(0.54, 0.38, 0.29, 0.29)) return out
7ddd7031313ac5c90f022a6a60c81ad12b4d5dac
3,647,189
import time def storyOne(player): """First Story Event""" player.story += 1 clear() print("The dust gathers around, swirling, shaking, taking some sort of shape.") time.sleep(2) print("Its the bloody hermit again!") time.sleep(2) clear() print("Hermit: Greetings, " + str(player.name) + ". It is good to see you.") print(str(player.name) + ": Really? You still alive?") time.sleep(5) clear() print("Hermit: Shut up.\n\nAlso, incidentally, I'm here to warn you. The world has noticed you... Your progress will become... Difficult.") time.sleep(4) clear() print("Hermit: Now, a choice awaits you. I have the power to offer you a gift!") time.sleep(2) clear() print("0: A better weapon.") print("1: Better armor.") print("2: A better enchantment.") print("3: A rank increase.") choice = input("Enter a number between 0 and 3: ") if choice == "0": player.weapon += 1 elif choice == "1": player.armor += 1 elif choice == "2": player.enchantment += 1 elif choice == "3": player.level += 1 else: pass clear() print("Hermit: Excellent!") print(kill_hermit()) time.sleep(4) clear() return True
fbaff47f27c5ec474caa73d2f87d77029f4814a4
3,647,190
def mcat(i): """Concatenate a list of matrices into a single matrix using separators ',' and ';'. The ',' means horizontal concatenation and the ';' means vertical concatenation. """ if i is None: return marray() # calculate the shape rows = [[]] final_rows = 0 final_cols = 0 crows = ccols = 0 pos = [] pos2 = [] for x in i: #if x == ';': if x is Ellipsis: rows.append([]) if final_cols > 0 and final_cols != ccols: error("Incompatible shapes!") else: final_cols = ccols final_rows += crows ccols = 0 pos.append(Ellipsis) elif isinstance(x, mvar): shp = x.msize if len(shp) < 1: shp = [0] if len(shp) < 2: shp += [0] rows[-1].append(shp[0]) pos.append( (slice(final_rows, final_rows+shp[0]), slice(ccols, ccols+shp[1])) ) crows = shp[0] # FIXME ccols += shp[1] elif _isscalar(x): rows[-1].append(1) pos.append( (final_rows, ccols) ) crows = 1 ccols += 1 else: raise OMPCException("Unsupported type: %s!"%type(x)) if final_cols > 0 and final_cols != ccols: error("Incompatible shapes!") else: final_cols = ccols final_rows += crows out = empty((final_rows, final_cols), 'double') for sl, x in _izip(pos, i): if x is not Ellipsis: if isinstance(x, mvar): x = x._a out._a.__setitem__(sl[::-1], x) #out._a.reshape(final_cols, final_rows).T.__setitem__(sl, x) return out
ef424209b7c8f2e08d549bccf62a58c24c5048f3
3,647,191
import h5py import torch import tensorflow from typing import Dict from typing import Union def get_optional_info() -> Dict[str, Union[str, bool]]: """Get optional package info (tensorflow, pytorch, hdf5_bloscfilter, etc.) Returns ------- Dict[str, Union[str, False]] package name, package version (if installed, otherwise False) """ res = {} try: bloscFilterAvail = h5py.h5z.filter_avail(32001) except ImportError: # pragma: no cover bloscFilterAvail = False res['blosc-hdf5-plugin'] = bloscFilterAvail try: torchVersion = torch.__version__ except ImportError: # pragma: no cover torchVersion = False res['pytorch'] = torchVersion try: tensorflowVersion = tensorflow.__version__ except ImportError: # pragma: no cover tensorflowVersion = False res['tensorflow'] = tensorflowVersion return res
07255d4e889b669497628cd9b9c6e102ceb22bbf
3,647,192
import timeit def epsilon_experiment(dataset, n: int, eps_values: list): """ Function for the experiment explained in part (g). eps_values is a list, such as: [0.0001, 0.001, 0.005, 0.01, 0.05, 0.1, 1.0] Returns the errors as a list: [9786.5, 1234.5, ...] such that 9786.5 is the error when eps = 0.0001, 1234.5 is the error when eps = 0.001, and so forth. """ timer_list = [] total_errors = [] non_private_histogram = get_histogram(dataset) for epsilon in eps_values: start = timeit.default_timer() error_list = [] for _ in range(30): dp_histogram = get_dp_histogram(dataset, n, epsilon) av_error = calculate_average_error(non_private_histogram, dp_histogram) error_list.append(av_error) total_average_error = sum(error_list) / len(error_list) total_errors.append(total_average_error) stop = timeit.default_timer() timer_list.append(stop-start) return total_errors, timer_list
7aabe10dd97f594533a2da4a901b61790c8435f8
3,647,193
def infer_scaletype(scales): """Infer whether `scales` is linearly or exponentially distributed (if latter, also infers `nv`). Used internally on `scales` and `ssq_freqs`. Returns one of: 'linear', 'log', 'log-piecewise' """ scales = asnumpy(scales).reshape(-1, 1) if not isinstance(scales, np.ndarray): raise TypeError("`scales` must be a numpy array (got %s)" % type(scales)) elif scales.dtype not in (np.float32, np.float64): raise TypeError("`scales.dtype` must be np.float32 or np.float64 " "(got %s)" % scales.dtype) th_log = 1e-15 if scales.dtype == np.float64 else 4e-7 th_lin = th_log * 1e3 # less accurate for some reason if np.mean(np.abs(np.diff(scales, 2, axis=0))) < th_lin: scaletype = 'linear' nv = None elif np.mean(np.abs(np.diff(np.log(scales), 2, axis=0))) < th_log: scaletype = 'log' # ceil to avoid faulty float-int roundoffs nv = int(np.round(1 / np.diff(np.log2(scales), axis=0)[0])) elif logscale_transition_idx(scales) is None: raise ValueError("could not infer `scaletype` from `scales`; " "`scales` array must be linear or exponential. " "(got diff(scales)=%s..." % np.diff(scales, axis=0)[:4]) else: scaletype = 'log-piecewise' nv = nv_from_scales(scales) return scaletype, nv
50e961118a3c97835d279832b399ef72946f4b4a
3,647,194
from googleapiclient.http import build_http import google def authorized_http(credentials): """Returns an http client that is authorized with the given credentials. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to use. Returns: Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An authorized http client. """ if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials): if google_auth_httplib2 is None: raise ValueError( "Credentials from google.auth specified, but " "google-api-python-client is unable to use these credentials " "unless google-auth-httplib2 is installed. Please install " "google-auth-httplib2." ) return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http()) else: return credentials.authorize(build_http())
21d7a05e9d99f0a6e8414da5925f0d69224f846c
3,647,195
from typing import Optional from typing import List from typing import Tuple from typing import Callable def add_grating_couplers_with_loopback_fiber_array( component: Component, grating_coupler: ComponentSpec = grating_coupler_te, excluded_ports: Optional[List[str]] = None, grating_separation: float = 127.0, bend_radius_loopback: Optional[float] = None, gc_port_name: str = "o1", gc_rotation: int = -90, straight_separation: float = 5.0, bend: ComponentSpec = bend_euler, straight: ComponentSpec = straight_function, layer_label: Tuple[int, int] = (200, 0), layer_label_loopback: Optional[Tuple[int, int]] = None, component_name: Optional[str] = None, with_loopback: bool = True, nlabels_loopback: int = 2, get_input_labels_function: Callable = get_input_labels, cross_section: CrossSectionSpec = strip, select_ports: Callable = select_ports_optical, **kwargs, ) -> Component: """Returns a component with grating_couplers and loopback. Args: component: to add grating_couplers. grating_coupler: grating_coupler. excluded_ports: list of ports to exclude. grating_separation: in um. bend_radius_loopback: um. gc_port_name: optional grating coupler name. gc_rotation: grating coupler rotation in degrees. straight_separation: bend: bend spec. straight: straight spec. layer_label: optional layer_label. component_name: optional component name. with_loopback: If True, add compact loopback alignment ports. nlabels_loopback: number of ports to label (0: no labels, 1: first port, 2: both ports). cross_section: CrossSectionSpec. select_ports: function to select ports. kwargs: cross_section settings """ x = gf.get_cross_section(cross_section, **kwargs) bend_radius_loopback = bend_radius_loopback or x.radius excluded_ports = excluded_ports or [] gc = gf.get_component(grating_coupler) direction = "S" component_name = component_name or component.metadata_child.get("name") c = Component() c.component = component c.info["polarization"] = gc.info["polarization"] c.info["wavelength"] = gc.info["wavelength"] c.add_ref(component) # Find grating port name if not specified if gc_port_name is None: gc_port_name = list(gc.ports.values())[0].name # List the optical ports to connect optical_ports = select_ports(component.ports) optical_ports = list(optical_ports.values()) optical_ports = [p for p in optical_ports if p.name not in excluded_ports] optical_ports = direction_ports_from_list_ports(optical_ports)[direction] # Check if the ports are equally spaced grating_separation_extracted = check_ports_have_equal_spacing(optical_ports) if grating_separation_extracted != grating_separation: raise ValueError( f"Grating separation must be {grating_separation}. Got {grating_separation_extracted}" ) # Add grating references references = [] for port in optical_ports: gc_ref = c.add_ref(gc) gc_ref.connect(gc.ports[gc_port_name].name, port) references += [gc_ref] labels = get_input_labels_function( io_gratings=references, ordered_ports=optical_ports, component_name=component_name, layer_label=layer_label, gc_port_name=gc_port_name, ) c.add(labels) if with_loopback: y0 = references[0].ports[gc_port_name].y xs = [p.x for p in optical_ports] x0 = min(xs) - grating_separation x1 = max(xs) + grating_separation gca1, gca2 = [ gc.ref(position=(x, y0), rotation=gc_rotation, port_id=gc_port_name) for x in [x0, x1] ] gsi = gc.size_info port0 = gca1.ports[gc_port_name] port1 = gca2.ports[gc_port_name] p0 = port0.position p1 = port1.position a = bend_radius_loopback + 0.5 b = max(2 * a, grating_separation / 2) y_bot_align_route = -gsi.width - straight_separation points = np.array( [ p0, p0 + (0, a), p0 + (b, a), p0 + (b, y_bot_align_route), p1 + (-b, y_bot_align_route), p1 + (-b, a), p1 + (0, a), p1, ] ) bend90 = gf.get_component( bend, radius=bend_radius_loopback, cross_section=cross_section, **kwargs ) loopback_route = round_corners( points=points, bend=bend90, straight=straight, cross_section=cross_section, **kwargs, ) c.add([gca1, gca2]) c.add(loopback_route.references) component_name_loopback = f"loopback_{component_name}" if nlabels_loopback == 1: io_gratings_loopback = [gca1] ordered_ports_loopback = [port0] if nlabels_loopback == 2: io_gratings_loopback = [gca1, gca2] ordered_ports_loopback = [port0, port1] if nlabels_loopback == 0: pass elif 0 < nlabels_loopback <= 2: c.add( get_input_labels_function( io_gratings=io_gratings_loopback, ordered_ports=ordered_ports_loopback, component_name=component_name_loopback, layer_label=layer_label_loopback or layer_label, gc_port_name=gc_port_name, ) ) else: raise ValueError( f"Invalid nlabels_loopback = {nlabels_loopback}, " "valid (0: no labels, 1: first port, 2: both ports2)" ) c.copy_child_info(component) return c
4452851ecc05f46ebf46cd92d6edbea9062bae35
3,647,196
import os import logging from datetime import datetime import json def FEBA_Save_Tables(gene_fit_d, genes_df, organism_name_str, op_dir, exps_df, cfg=None, writeImage=False, debug=False): """ Args: gene_fit_d (python dict): Documentation above function genes_df (pandas DataFrame): table genes.GC organism_name_str (str): Name of organism op_dir (str): Directory to write all saved tables and JSON to. exps_df (pandas DataFrame): from FEBA.BarSeq Must contain cols: name short writeImage (bool): Should we save all the data in one image to be easily imported into python/R? Note: We merge many dataframes on the locusId columns """ if cfg is None: cfg = { "strong_lr": 2, "strong_t": 5 } # Setting print options for debugging: pd.set_option('display.max_columns', None) if not os.path.isdir(op_dir): os.mkdir(op_dir) for expected_key in ["q","lr","lrn","lrn1","lrn2","t", "genesUsed","g", "lrNaive"]: if expected_key not in gene_fit_d: raise Exception(f"Missing expected key in gene_fit_d: {expected_key}") for name in gene_fit_d['q']['name']: if name not in gene_fit_d['lr'].columns: raise Exception(f"Name {name} missing from 'lr' object.") if name not in gene_fit_d['lrn'].columns: raise Exception(f"Name {name} missing from 'lrn' object.") for val in ["locusId", "sysName", "desc"]: if val not in genes_df.columns: raise Exception(f"Column name {val} not in genes_df") # Preparing variables that make it simpler to create_tables first3_cols = ["locusId", "sysName", "desc"] genes_first3 = genes_df[first3_cols] final_colnames = list(gene_fit_d['q']['name'] + ' ' + gene_fit_d['q']['short']) # WRITING TABLES: write_DataFrame_and_log(os.path.join(op_dir, "fit_quality.tsv"), gene_fit_d['q'], df_name="quality") #2 Fit genes - All genes, with some having the used column = True # used is a boolean list used = [(genes_df['locusId'].iat[i] in gene_fit_d['genesUsed']) \ for i in range(len(genes_df['locusId']))] new_genes_df = genes_df.copy(deep=True) new_genes_df['used'] = used write_DataFrame_and_log(os.path.join(op_dir, "fit_genes.tab"), new_genes_df, df_name = "Fit genes") del new_genes_df, used #3 Fit Log Ratios unnormalized pre_merge = gene_fit_d['lr'] pre_merge['locusId'] = gene_fit_d['g'] # below how is 'inner' by default, which is the fitting merge type tmp_df = genes_first3.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_unnormalized.tab"), tmp_df, df_name = "log ratios unnormalized") #4 Log Ratios Unnormalized Naive (Can put into 'extract...' function) pre_merge = gene_fit_d['lrNaive'].copy(deep=True) pre_merge['locusId'] = gene_fit_d['g'] tmp_df = genes_first3.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_unnormalized_naive.tab"), tmp_df, df_name = "log ratios unnormalized naive") #5 Fit Logratios pre_merge = gene_fit_d['lrn'].copy(deep=True) pre_merge['locusId'] = gene_fit_d['g'] tmp_df = genes_first3.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios.tab"), tmp_df, df_name = "fit logratios") #6 Fit Log Ratios 1st half (Can put into 'extract...' function) pre_merge = gene_fit_d['lrn1'].copy(deep=True) pre_merge['locusId'] = gene_fit_d['g'] tmp_df = genes_first3.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_half1.tab"), tmp_df, df_name = "fit logratios 1st half") #7 Fit Log Ratios 2nd half (Can put into 'extract...' function) pre_merge = gene_fit_d['lrn2'].copy(deep=True) pre_merge['locusId'] = gene_fit_d['g'] tmp_df = genes_first3.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_half2.tab"), tmp_df, df_name = "fit logratios 2nd half") print(genes_df) #8 Fit Log Ratios Good (?) genes_in_g_bool = [bool(genes_df['locusId'].iat[i] in gene_fit_d['g'].values) for i \ in range(genes_df.shape[0])] f3col_genes_df = genes_df[first3_cols][genes_in_g_bool] f3col_genes_df['comb'] = f3col_genes_df['sysName'] + ' ' + f3col_genes_df['desc'] tmp_df = f3col_genes_df.copy(deep=True) # q is quality, u is used if list(gene_fit_d['q']['u']).count(True) == 0: logging.warning("***Warning: 0 'OK' experiments.") tmp_new = tmp_df.sort_values(by='locusId') else: used_q_rows = gene_fit_d['q'][gene_fit_d['q']['u']] used_names = used_q_rows['name'] lrn_copy = gene_fit_d['lrn'].copy(deep=True) lrn_copy = lrn_copy[used_names] lrn_copy['locusId'] = gene_fit_d['g'] tmp_new = tmp_df.merge(lrn_copy, on="locusId") rename_columns = list(used_q_rows['name'] + ' ' + used_q_rows['short']) rename_d = {val: rename_columns[ix] for ix, val in enumerate(list(tmp_new.columns[4:]))} tmp_new = tmp_new.rename(columns=rename_d) tmp_new = tmp_new.sort_values(by='locusId') del lrn_copy write_DataFrame_and_log(os.path.join(op_dir, "fit_logratios_good.tab"), tmp_new, df_name = "fit logratios good") del tmp_new #9 Gene Counts pre_merge = gene_fit_d['tot'].copy(deep=True) pre_merge['locusId'] = gene_fit_d['g'] tmp_df = f3col_genes_df.merge(pre_merge, on="locusId") write_DataFrame_and_log(os.path.join(op_dir, "gene_counts.tab"), tmp_df, df_name = "gene counts") #10 Fit T Scores extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['t'], gene_fit_d['g'], genes_first3, final_colnames, os.path.join(op_dir, "fit_t.tab"), "fit t") #11 Fit standard error extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['se'], gene_fit_d['g'], genes_first3, final_colnames, os.path.join(op_dir, "fit_standard_error_obs.tab"), "fit standard error") #12 Fit Standard Error Naive extract_gene_fit_d_category_to_tsv_basic(gene_fit_d['sdNaive'], gene_fit_d['g'], genes_first3, final_colnames, os.path.join(op_dir, "fit_standard_error_naive.tab"), "fit standard error naive") #13 Strain Fit logging.info("Getting order of scaffolds to print Strain Fit.") tmp_df = gene_fit_d['strains'].join(gene_fit_d['strain_lrn']) tmp_df.sort_values(by=['scaffold','pos']) write_DataFrame_and_log(os.path.join(op_dir,"strain_fit.tab"), tmp_df, df_name="Strain Fit") #14 expsUsed (subset of original exps file with used experiments write_DataFrame_and_log(os.path.join(op_dir,"expsUsed.tab"), exps_df, df_name="expsUsed") #15 Cofit if 'cofit' in gene_fit_d and gene_fit_d['cofit'] is not None: # Why do we repeat the three columns sysName, locusId and desc # with hitSysName, hitId, and hitDesc etc? tmp_df = f3col_genes_df.merge(gene_fit_d['cofit'], on="locusId") pre_merge_df = pd.DataFrame.from_dict({ "hitId" : genes_df["locusId"], "hitSysName" : genes_df["sysName"], "hitDesc" : genes_df["desc"] }) tmp_df = tmp_df.merge(pre_merge_df) tmp_df.sort_values(by=["locusId", "rank"], inplace=True, axis=0) else: logging.warning("Cofit not found in gene_fit_d") tmp_df = pd.DataFrame.from_dict({ "locusId": [""], "sysName": [""], "desc": [""], "cofit": [""], "rank":[""], "hitId": [""], "hitSysName": [""], "hitDesc": [""] }) write_DataFrame_and_log(os.path.join(op_dir, "cofit.tab"), tmp_df, df_name="cofit") #16 specphe - specific phenotypes if "specphe" in gene_fit_d and gene_fit_d["specphe"] is not None: #print(f3col_genes_df) #print(f3col_genes_df.dtypes) #print(gene_fit_d['specphe']) #print(gene_fit_d['specphe'].dtypes) tmp_df = f3col_genes_df.merge(gene_fit_d['specphe'], on="locusId") else: tmp_df = pd.DataFrame.from_dict({ "locusId": [""], "sysName": [""], "desc": [""], "short": [""], "Group": [""], "Condition_1": [""], "Concentraion_1": [""], "Units_1": [""], "Condition_2": [""], "Concentration_2": [""], "Units_2": [""], }) print(tmp_df.head(6)) write_DataFrame_and_log(os.path.join(op_dir, "specific_phenotypes.tab"), tmp_df, df_name="specific phenotypes") # 17 Strong - # We create the dataframe 'strong.tab' # we find which normalized log ratios are greater than 2 e.g. and # 't' scores are greater than 5 e.g. create_strong_tab(gene_fit_d, genes_df, exps_df, op_dir, strong_lr=cfg["strong_lr"], strong_t=cfg["strong_t"], debug=debug) #18 High # High Fitness if "high" in gene_fit_d: write_DataFrame_and_log(os.path.join(op_dir, "high_fitness.tab"), gene_fit_d['high'], df_name="high fitness") #19 HTML Info html_info_d = { "organism_name": organism_name_str, "number_of_experiments": len(gene_fit_d['q']['short']) - \ list(gene_fit_d['q']['short']).count("Time0"), "number_of_successes": list(gene_fit_d['q']['u']).count(True), "version": gene_fit_d['version'], "date": str(datetime.now()) } with open(os.path.join(op_dir, "html_info.json"), 'w') as g: g.write(json.dumps(html_info_d, indent=2)) logging.info("Finished exporting all tables and files to " + op_dir) return 0
6da02333d462afe79f9189cde3b3c92cd6793955
3,647,197
import pytz from datetime import datetime import json def auto_update_function(cities): """Auto-update weather function The function takes a list of the cities to update. If the error connecting to sources - an error with a status of 500 and JSON with the cause of the error and URL. If the connection is successful, it enters the data into the database and returns an empty response with code 200. """ try: connect = psycopg2.connect(database = 'django_test', user = 'roman', host = 'localhost', password = 'admin') cursor = connect.cursor() cursor.execute( 'SELECT city_name FROM frontend_city;' ) utc_timezone = pytz.timezone('UTC') #read current city list from database cities_list = [] cities_cursor = cursor.fetchall() #list of tuple to just list for i in range(len(cities_cursor)): cities_list.append(cities_cursor[i][0]) for i in range(len(cities)): yandex_value = yandex(cities[i]) open_weather_value = open_weather_map(cities[i]) # error in yandex source if type(yandex_value[0]) == error.HTTPError: data = { 'Error': 'Error in auto update function.', 'Time': str(datetime.datetime.now(utc_timezone)), 'Reason': '{}. Please, check url: {}'.format(yandex_value[0], yandex_value[1]) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response # error in open weather source elif (type(open_weather_value[0]) == error.HTTPError): data = { 'Error': 'Error in auto update function.', 'Time': datetime.datetime.now(utc_timezone), 'Reason': '{}. Please, check url: {}'.format(open_weather_value[0], open_weather_value[1]) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response #If the city has not been checked before elif (cities[i] not in cities_list): cursor.execute("INSERT INTO frontend_city (city_name) values ('{}');".format(cities[i])) connect.commit() data = { 'Yandex': str(yandex_value[0]), 'Open weather': str(open_weather_value[0]) } cursor.execute("SELECT id FROM frontend_city WHERE city_name = '{}';".format(cities[i])) city_id = cursor.fetchall() city_id = city_id[0][0] json_data = json.dumps(data) cursor.execute( "INSERT INTO frontend_history (city_id, temp_values, created) \ VALUES ({},'{}', '{}');".format(city_id, json_data, datetime.datetime.now(utc_timezone))) connect.commit() connect.close() response = HttpResponse(status=200, content_type='text/html', charset='utf-8') return response except Exception as connection_db_error: data = { 'Error': 'Error in auto update function.', 'Time': str(datetime.datetime.now(utc_timezone)), 'Reason': '{}'.format(connection_db_error) } json_data_error = json.dumps(data) response = HttpResponse(json_data_error, status=500, content_type='application/json', charset='utf-8') return response connect.close()
6c57685b1d4a4c62d6225df17dd1bbee6c1a3934
3,647,198
def absolute_sum_of_changes(x): """ Returns the sum over the absolute value of consecutive changes in the series x .. math:: \\sum_{i=1, \ldots, n-1} \\mid x_{i+1}- x_i \\mid :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :return type: float """ return np.sum(abs(np.diff(x)))
b9cc5109335b754d7d6c8014af5d84e75cd94723
3,647,199