content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def simplex_edge_tensors(dimensions, # type: int centers_in, # type: List[List[int]] centers_out, # type: List[List[int]] surrounds_in, # type: List[List[int]] surrounds_out, # type: List[List[int]] attractor_function=__euclid_function_generator, # type: Callable[[Real], Callable[[Real], Real]] flip=None # type: Optional[int] ): """ Generates the minimum number of edge_orientation_detector tensors needed to represent all orientations of boundaries in n-dimensional space, with positive values only. This results in one more tensor than when negative values are allowed. :param dimensions: number of dimensions. :param centers_in: list of colors added together on points on the edge_orientation_detector. :param centers_out: list of colors outputted on points on the edge_orientation_detector. :param surrounds_in: list of colors subtracted together on points off the edge_orientation_detector :param surrounds_out: list of colors outputted on points off the edge_orientation_detector. :param attractor_function: function that takes in the number of dimensions and outputs a function that takes in distances and returns positive values for small distances and negative values for large distances. :return: a list of tensors for finding all orientations of boundaries. """ simplex = __simplex_coordinates(dimensions) if flip is not None: simplex = np.flip(simplex, flip) return [edge_tensor(simplex_vector, center_in, center_out, surround_in, surround_out, attractor_function) for simplex_vector, center_in, center_out, surround_in, surround_out in zip(simplex, centers_in, centers_out, surrounds_in, surrounds_out)]
fb1fdf0a46939db10770984b28dc4f33cb42d0b9
3,645,300
def hashtoaddress(PARAMETER): """ Converts a 160-bit hash to an address. [PARAMETER] is required and should be an address hash. """ d = urllib2.urlopen(blockexplorer('hashtoaddress') + '/' + str(PARAMETER)) return d.read()
6e96698792d1e64c3feca9d6d9b14b02554cfc50
3,645,301
import types import builtins import ast def get_all_objects(line: str, frame: types.FrameType) -> ObjectsInfo: """Given a (partial) line of code and a frame, obtains a dict containing all the relevant information about objects found on that line so that they can be formatted as part of the answer to "where()" or they can be used during the analysis of the cause of the exception. The dict returned has five keys. The first three, 'locals', 'globals', 'builtins', each containing a list of tuples, each tuple being of the form (name, repr(obj), obj) where name --> obj. The fourth key, 'expressions', contains a list of tuples of the form ('name', obj). It is only occasionally used in helping to make suggestions regarding the cause of some exception. """ objects: ObjectsInfo = { "locals": [], "globals": [], "builtins": [], "expressions": [], "name, obj": [], } scopes = ( ("locals", frame.f_locals), # always have locals before globals ("globals", frame.f_globals), ) names = set() tokens = token_utils.get_significant_tokens(line) if not tokens: return objects for tok in tokens: if tok.is_identifier(): name = tok.string if name in names: continue for scope, scope_dict in scopes: if name in scope_dict: names.add(name) obj = scope_dict[name] objects[scope].append((name, repr(obj), obj)) objects["name, obj"].append((name, obj)) break else: if name in dir(builtins): names.add(name) obj = getattr(builtins, name) objects["builtins"].append((name, repr(obj), obj)) objects["name, obj"].append((name, obj)) line = line.strip() if line.startswith(("def", "if", "while", "class", "for")) and line.endswith(":"): line += " pass" try: atok = ASTTokens(line.strip(), parse=True) except SyntaxError as e: if "unexpected EOF" not in str(e): debug_helper.log(f"problem with ASTTokens: {e}") debug_helper.log(f"line: {line}") return objects if atok is not None: evaluator = Evaluator.from_frame(frame) for nodes, obj in group_expressions( pair for pair in evaluator.find_expressions(atok.tree) ): name = atok.get_text(nodes[0]) if not name or name in names: continue names.add(name) objects["name, obj"].append((name, obj)) try: # We're not interested in showing literals in the list of variables ast.literal_eval(name) except Exception: # noqa objects["expressions"].append((name, obj)) return objects
65f2d8e756da32d883c07456feb1d088aa5f4efa
3,645,302
def magenta(msg): """Return colorized <msg> in magenta""" return __fore(msg, 'magenta')
64eda26662e283779d1a0c1884166b538aa6bb8f
3,645,303
def request_latest_news(): """ This Method queries the last item of the database and convert it to a string. :return: A String with the last item of the database """ article = News.query.order_by(News.id.desc()).first() return format_latest_article(article, request.content_type)
4ff0dc4d7f63465125d38f0683619e59a8f915e0
3,645,304
def is_vulgar(words, sentence): """Checks if a given line has any of the bad words from the bad words list.""" for word in words: if word in sentence: return 1 return 0
f8ff64f1d29313c145ebbff8fef01961e14cfd1f
3,645,305
def edges_cross(graph, nodes1, nodes2): """ Finds edges between two sets of disjoint nodes. Running time is O(len(nodes1) * len(nodes2)) Args: graph (nx.Graph): an undirected graph nodes1 (set): set of nodes disjoint from `nodes2` nodes2 (set): set of nodes disjoint from `nodes1`. """ return {e_(u, v) for u in nodes1 for v in nodes2.intersection(graph.adj[u])}
96c3b2d2de97547cb16d9f2e0071bb093e815d28
3,645,306
def basket_view(func): """ Returns rendered page for basket """ @jinja2_view('basket.html', template_lookup=[TEMPLATES_DIR]) def _basket_view_call(*args, **kwargs): func(*args, **kwargs) return {'col_mapping': COLUMN_MAPPING, 'product_list': _format_products_for_web(get_basket_products())} return _basket_view_call
c818d1bd77fe100df857d746109f20caebd8581f
3,645,307
def py2to3(target_path, interpreter_command_name="python", is_transform=False, is_del_bak=False, is_html_diff=False, is_check_requirements=False): """ The main entrance of the 2to3 function provides a series of parameter entrances. The main functions are as follows: 1. Whether to enable automatic conversion of Python2 code to Python3 2. Determine whether to keep a backup of Python2 code 3. Determine whether to open the conversion code text comparison 4. Determine whether the version of the library that the project depends on is suitable for the current Python environment. :param target_path: str, project path :param interpreter_command_name: str, interpreter command name, default "python" Please make sure that the Python terminal environment has been configured successfully :param is_transform: bool, default False :param is_del_bak: bool, default False :param is_html_diff: bool, default False :param is_check_requirements: bool, default False :return: bool, ignore """ # Whether to enable automatic conversion of Python2 code to Python3 if is_transform: files_transform( target_path=target_path, interpreter_command_name=interpreter_command_name ) # Determine whether to keep a backup of Python2 code if is_del_bak: bak_files_clear(target_path=target_path) # Determine whether to open the conversion code text comparison if is_html_diff: html_diff_generate(target_path=target_path) # Determine whether the version of the library that the project # depends on is suitable for the current Python environment. if is_check_requirements: libraries_detect_and_recommend(target_path=target_path) return True
8581beacd7daa174309da99c6857acec841345bf
3,645,308
import os def _norm_path(path): """ Decorator function intended for using it to normalize a the output of a path retrieval function. Useful for fixing the slash/backslash windows cases. """ def normalize_path(*args): return os.path.normpath(path(*args)) return normalize_path
5d86cc9fdab4ed9643398e2741bcf5f90d8b97e5
3,645,309
import re def _get_hash_aliases(name): """ internal helper used by :func:`lookup_hash` -- normalize arbitrary hash name to hashlib format. if name not recognized, returns dummy record and issues a warning. :arg name: unnormalized name :returns: tuple with 2+ elements: ``(hashlib_name, iana_name|None, ... 0+ aliases)``. """ # normalize input orig = name if not isinstance(name, str): name = to_native_str(name, 'utf-8', 'hash name') name = re.sub("[_ /]", "-", name.strip().lower()) if name.startswith("scram-"): # helper for SCRAM protocol (see passlib.handlers.scram) name = name[6:] if name.endswith("-plus"): name = name[:-5] # look through standard names and known aliases def check_table(name): for row in _known_hash_names: if name in row: return row result = check_table(name) if result: return result # try to clean name up some more m = re.match(r"(?i)^(?P<name>[a-z]+)-?(?P<rev>\d)?-?(?P<size>\d{3,4})?$", name) if m: # roughly follows "SHA2-256" style format, normalize representation, # and checked table. iana_name, rev, size = m.group("name", "rev", "size") if rev: iana_name += rev hashlib_name = iana_name if size: iana_name += "-" + size if rev: hashlib_name += "_" hashlib_name += size result = check_table(iana_name) if result: return result # not found in table, but roughly recognize format. use names we built up as fallback. log.info("normalizing unrecognized hash name %r => %r / %r", orig, hashlib_name, iana_name) else: # just can't make sense of it. return something iana_name = name hashlib_name = name.replace("-", "_") log.warning("normalizing unrecognized hash name and format %r => %r / %r", orig, hashlib_name, iana_name) return hashlib_name, iana_name
537c30fee93c465a768e80dd6fc8314555b65df5
3,645,310
def dirac_2d_v_and_h(direction, G_row, vec_len_row, num_vec_row, G_col, vec_len_col, num_vec_col, a, K, noise_level, max_ini, stop_cri): """ used to run the reconstructions along horizontal and vertical directions in parallel. """ if direction == 0: # row reconstruction c_recon, min_error, b_recon, ini = \ recon_2d_dirac_vertical(G_row, vec_len_row, num_vec_row, a, K, noise_level, max_ini, stop_cri) else: # column reconstruction c_recon, min_error, b_recon, ini = \ recon_2d_dirac_vertical(G_col, vec_len_col, num_vec_col, a, K, noise_level, max_ini, stop_cri) return c_recon, min_error, b_recon, ini
e68945c68cb80ef001e027c30651d1f3a38369e4
3,645,311
import importlib from typing import Tuple def Matrix(*args, **kwargs): """*Funktion zur Erzeugung von Matrizen mit beliebiger Dimension""" h = kwargs.get("h") if h in (1, 2, 3): matrix_hilfe(h) return elif isinstance(h, (Integer, int)): matrix_hilfe(1) return Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor # Erzeugen einer SymPy-Matrix auf die übliche Art if iterable(args) and not isinstance(args[0], Vektor): m = SympyMatrix(*args, **kwargs) for i in range(m.rows): for j in range(m.cols): try: m[i, j] = nsimplify(m[i, j]) except RecursionError: pass return m # Erzeugen einer SymPy-Matrix anhand der Spaltenvektoren try: if not args: raise AglaError('mindestens zwei Vektoren angeben') if isinstance(args[0], (tuple, Tuple, list, set)): vektoren = args[0] if not type(vektoren) == list: vektoren = list(vektoren) else: vektoren = list(args) if not all(isinstance(v, Vektor) for v in vektoren): raise AglaError('Vektoren angeben') if not all(v.dim == vektoren[0].dim for v in vektoren): raise AglaError('die Vektoren haben unterschiedliche Dimension') except AglaError as e: print('agla:', str(e)) liste = [ [k for k in v.komp] for v in vektoren ] m, n = vektoren[0].dim, len(vektoren) zeilen = [ [liste[i][j] for i in range(n)] for j in range(m) ] M = SympyMatrix(zeilen) return M
f9bae41e6ce6f6b3c144d8844317ae7b2272bb91
3,645,312
def afw_word_acceptance(afw: dict, word: list) -> bool: """ Checks if a **word** is accepted by input AFW, returning True/False. The word w is accepted by a AFW if exists at least an accepting run on w. A run for AFWs is a tree and an alternating automaton can have multiple runs on a given input. A run is accepting if all the leaf nodes are accepting states. :param dict afw: input AFW; :param list word: list of symbols ∈ afw['alphabet']. :return: *(bool)*, True if the word is accepted, False otherwise. """ return __recursive_acceptance(afw, afw['initial_state'], word)
52ff4c5fa2c8d2c8af667ee9c03e587b2c4ac10b
3,645,313
from operator import and_ def get_following(): """ endpoint: /release/following method: GET param: "[header: Authorization] Token": str - Token received from firebase response_type: array response: id: 1 created: 123456789 vol: 1 chapter: 1 title: Chapter titles url: /chapter/1 manga: title: manga title url: /manga/1/manga-title cover: manga_cover_url error: 404: code: 404 message: There are no new chapters available """ list_manga = UsersManga.query.filter(and_( UsersManga.user_uid.like(g.uid), UsersManga.favorited.is_(True), )).all() list_manga_id = [x.mangas.id for x in list_manga] chapters = ( Chapter.query .filter(Chapter.manga_id.in_(list_manga_id)) .order_by(Chapter.manga_id) .distinct(Chapter.manga_id) .from_self() .order_by(Chapter.created.desc()) .limit(10).all() ) if not chapters: return jsonify({ 'code': 404, 'message': 'There are no new chapters available' }) return jsonify(chapters_schema.dump(chapters).data)
90999ec6a4e14bf3c3633ef38f0e020cca62623b
3,645,314
import re def matchNoSpaces(value): """Match strings with no spaces.""" if re.search('\s', value): return False return True
6b33c6b500f78664c04ef8c507e9b25fa19c760d
3,645,315
import re def collect_inline_comments(list_of_strings,begin_token=None,end_token=None): """Reads a list of strings and returns all of the inline comments in a list. Output form is ['comment',line_number,string_location] returns None if there are none or tokens are set to None""" if begin_token in [None] and end_token in [None]: return None match=re.compile('{0}(?P<inline_comments>.*){1}'.format(re.escape(begin_token),re.escape(end_token))) inline_comment_list=[] for index,line in enumerate(list_of_strings): comment_match=re.search(match,line) if comment_match: inline_comment_list.append([comment_match.group('inline_comments'),index,comment_match.start()]) if inline_comment_list: return inline_comment_list else: return None
8ff2dfa055b2f2a3ef72842518b2fb87bcb62c1e
3,645,316
import os from pathlib import Path import pickle def get_df_ads(): """ """ #| - get_df_ads # ##################################################### # import pickle; import os path_i = os.path.join( os.environ["PROJ_irox_oer"], "dft_workflow/job_analysis/collect_collate_dft_data", "out_data/df_ads.pickle") # with open(path_i, "rb") as fle: # df_ads = pickle.load(fle) # # ##################################################### my_file = Path(path_i) if my_file.is_file(): with open(path_i, "rb") as fle: df_ads = pickle.load(fle) else: print("Couldn't read df_ads") print(path_i) print("") df_ads = pd.DataFrame() return(df_ads) #__|
f2253eff4dc74f55c3b256513fe6821b55015567
3,645,317
def cli_list(apic, args): """Implement CLI command `list`. """ # pylint: disable=unused-argument instances = apic.get_instances() if instances: print('\n'.join(apic.get_instances())) return 0
7b96b1a7cf85c86627382143e1e0786956546ec1
3,645,318
def is_symmetric(a: np.array): """ Check whether the matrix is symmetric :param a: :return: """ tol = 1e-10 return (np.abs(a - a.T) <= tol).all()
223784091cd797d5ba5f3814fb097252d1afc349
3,645,319
def get_number(line, position): """Searches for the end of a number. Args: line (str): The line in which the number was found. position (int): The starting position of the number. Returns: str: The number found. int: The position after the number found. """ word = "" for pos, char in enumerate(line[position:]): if char.isdigit() or char == ".": word += char else: return word, position + pos return word, len(line)
df41a1b53953b912e5ce5d6d9b3d69c4133460f1
3,645,320
from typing import TextIO import yaml def load(f: TextIO) -> Config: """Load a configuration from a file-like object f""" config = yaml.safe_load(f) if isinstance(config["diag_table"], dict): config["diag_table"] = DiagTable.from_dict(config["diag_table"]) return config
0a977a5eda6ad8e0e5aa15315f914186ff65b4d6
3,645,321
def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None] """ # handle default value (mutable) # improve_candidates=(('block_gauss_seidel', # {'sweep': 'symmetric', 'iterations': 4}), # None) # -> make it a list if isinstance(to_levelize, tuple): if isinstance(to_levelize[0], tuple): to_levelize = list(to_levelize) if isinstance(to_levelize, (str, tuple)): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
8b302b8cae04adae010607c394c2e5059aa46eeb
3,645,322
def get_max_num_context_features(model_config): """Returns maximum number of context features from a given config. Args: model_config: A model config file. Returns: An integer specifying the max number of context features if the model config contains context_config, None otherwise """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "context_config"): return meta_architecture_config.context_config.max_num_context_features
1df5d220e30cfa5b440c0063149e2ebaf896352a
3,645,323
import hashlib def hashname(name, secsalt): """Obtain a sha256 hash from a name.""" m = hashlib.sha256() m.update((name + secsalt).encode("utf-8")) return m.hexdigest()
0db5fbf39eed899162535b6647a047f49e39fa34
3,645,324
def company_detail(request, stock_quote: int) -> HttpResponse: """ Return a view to Company details """ try: company = Company.objects.get(quote=str(stock_quote)) # TODO(me): Implement company detail view logic except Company.DoesNotExist: raise Http404("Company with releated quote does not exist") return render(request, 'company_detail.html', { 'company': company })
95e542e7386361709fedc98375bdbc9c5dd8780b
3,645,325
def parse_encoding_header(header): """ Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form, {'encoding-name':qvalue}. """ encodings = {'identity':1.0} for encoding in header.split(","): if(encoding.find(";") > -1): encoding, qvalue = encoding.split(";") encoding = encoding.strip() qvalue = qvalue.split('=', 1)[1] if(qvalue != ""): encodings[encoding] = float(qvalue) else: encodings[encoding] = 1 else: encodings[encoding] = 1 return encodings
0d423ad51ff14589b5858681cf32a0f318e6dbfa
3,645,326
def opf_consfcn(x, om, Ybus, Yf, Yt, ppopt, il=None, *args): """Evaluates nonlinear constraints and their Jacobian for OPF. Constraint evaluation function for AC optimal power flow, suitable for use with L{pips}. Computes constraint vectors and their gradients. @param x: optimization vector @param om: OPF model object @param Ybus: bus admittance matrix @param Yf: admittance matrix for "from" end of constrained branches @param Yt: admittance matrix for "to" end of constrained branches @param ppopt: PYPOWER options vector @param il: (optional) vector of branch indices corresponding to branches with flow limits (all others are assumed to be unconstrained). The default is C{range(nl)} (all branches). C{Yf} and C{Yt} contain only the rows corresponding to C{il}. @return: C{h} - vector of inequality constraint values (flow limits) limit^2 - flow^2, where the flow can be apparent power real power or current, depending on value of C{OPF_FLOW_LIM} in C{ppopt} (only for constrained lines). C{g} - vector of equality constraint values (power balances). C{dh} - (optional) inequality constraint gradients, column j is gradient of h(j). C{dg} - (optional) equality constraint gradients. @see: L{opf_costfcn}, L{opf_hessfcn} @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad Autonoma de Manizales) @author: Ray Zimmerman (PSERC Cornell) """ ##----- initialize ----- ## unpack data ppc = om.get_ppc() baseMVA, bus, gen, branch = \ ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"] vv, _, _, _ = om.get_idx() ## problem dimensions nb = bus.shape[0] ## number of buses nl = branch.shape[0] ## number of branches ng = gen.shape[0] ## number of dispatchable injections nxyz = len(x) ## total number of control vars of all types ## set default constrained lines if il is None: il = arange(nl) ## all lines have limits by default nl2 = len(il) ## number of constrained lines ## grab Pg & Qg Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]] ## active generation in p.u. Qg = x[vv["i1"]["Qg"]:vv["iN"]["Qg"]] ## reactive generation in p.u. ## put Pg & Qg back in gen gen[:, PG] = Pg * baseMVA ## active generation in MW gen[:, QG] = Qg * baseMVA ## reactive generation in MVAr ## rebuild Sbus Sbus = makeSbus(baseMVA, bus, gen) ## net injected power in p.u. ## ----- evaluate constraints ----- ## reconstruct V Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]] Vm = x[vv["i1"]["Vm"]:vv["iN"]["Vm"]] V = Vm * exp(1j * Va) ## evaluate power flow equations mis = V * conj(Ybus * V) - Sbus ##----- evaluate constraint function values ----- ## first, the equality constraints (power flow) g = r_[ mis.real, ## active power mismatch for all buses mis.imag ] ## reactive power mismatch for all buses ## then, the inequality constraints (branch flow limits) if nl2 > 0: flow_max = (branch[il, RATE_A] / baseMVA)**2 flow_max[flow_max == 0] = Inf if ppopt['OPF_FLOW_LIM'] == 2: ## current magnitude limit, |I| If = Yf * V It = Yt * V h = r_[ If * conj(If) - flow_max, ## branch I limits (from bus) It * conj(It) - flow_max ].real ## branch I limits (to bus) else: ## compute branch power flows ## complex power injected at "from" bus (p.u.) Sf = V[ branch[il, F_BUS].astype(int) ] * conj(Yf * V) ## complex power injected at "to" bus (p.u.) St = V[ branch[il, T_BUS].astype(int) ] * conj(Yt * V) if ppopt['OPF_FLOW_LIM'] == 1: ## active power limit, P (Pan Wei) h = r_[ Sf.real**2 - flow_max, ## branch P limits (from bus) St.real**2 - flow_max ] ## branch P limits (to bus) else: ## apparent power limit, |S| h = r_[ Sf * conj(Sf) - flow_max, ## branch S limits (from bus) St * conj(St) - flow_max ].real ## branch S limits (to bus) else: h = zeros((0,1)) ##----- evaluate partials of constraints ----- ## index ranges iVa = arange(vv["i1"]["Va"], vv["iN"]["Va"]) iVm = arange(vv["i1"]["Vm"], vv["iN"]["Vm"]) iPg = arange(vv["i1"]["Pg"], vv["iN"]["Pg"]) iQg = arange(vv["i1"]["Qg"], vv["iN"]["Qg"]) iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T ## compute partials of injected bus powers dSbus_dVm, dSbus_dVa = dSbus_dV(Ybus, V) ## w.r.t. V ## Pbus w.r.t. Pg, Qbus w.r.t. Qg neg_Cg = sparse((-ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) ## construct Jacobian of equality constraints (power flow) and transpose it dg = lil_matrix((2 * nb, nxyz)) blank = sparse((nb, ng)) dg[:, iVaVmPgQg] = vstack([ ## P mismatch w.r.t Va, Vm, Pg, Qg hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]), ## Q mismatch w.r.t Va, Vm, Pg, Qg hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg]) ], "csr") dg = dg.T if nl2 > 0: ## compute partials of Flows w.r.t. V if ppopt['OPF_FLOW_LIM'] == 2: ## current dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ dIbr_dV(branch[il, :], Yf, Yt, V) else: ## power dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ dSbr_dV(branch[il, :], Yf, Yt, V) if ppopt['OPF_FLOW_LIM'] == 1: ## real part of flow (active power) dFf_dVa = dFf_dVa.real dFf_dVm = dFf_dVm.real dFt_dVa = dFt_dVa.real dFt_dVm = dFt_dVm.real Ff = Ff.real Ft = Ft.real ## squared magnitude of flow (of complex power or current, or real power) df_dVa, df_dVm, dt_dVa, dt_dVm = \ dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft) ## construct Jacobian of inequality constraints (branch limits) ## and transpose it. dh = lil_matrix((2 * nl2, nxyz)) dh[:, r_[iVa, iVm].T] = vstack([ hstack([df_dVa, df_dVm]), ## "from" flow limit hstack([dt_dVa, dt_dVm]) ## "to" flow limit ], "csr") dh = dh.T else: dh = None return h, g, dh, dg
f90083088e6de9668ed44cdc950aa81bf96e2450
3,645,327
def iou3d_kernel(gt_boxes, pred_boxes): """ Core iou3d computation (with cuda) Args: gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates pred_boxes: [M, 7] Returns: iou3d: [N, M] """ intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2) gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5 gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5 pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5 pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5 max_of_min = np.maximum(gt_min_h, pred_min_h.T) min_of_max = np.minimum(gt_max_h, pred_max_h.T) inter_h = min_of_max - max_of_min inter_h[inter_h <= 0] = 0 #inter_h[intersection_2d <= 0] = 0 intersection_3d = intersection_2d * inter_h gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]] pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]] union_3d = gt_vol + pred_vol.T - intersection_3d #eps = 1e-6 #union_3d[union_3d<eps] = eps iou3d = intersection_3d / union_3d return iou3d
368f457b7afe6e5653839d130b6d6b8a6ce1ab7c
3,645,328
def get_final_metrics(raw_metrics, summarized=False): """ Calculates final metrics from all categories. :param summarized: True if the result should contain only final metrics (precision recall, f1 and f0.5) False if the result should contain all the per category metrics too. :param raw_metrics: A dictionary of tp, fp and fn values for each category :return: a dictionary with the precision, recall, f1 and f0.5 metrics, as well as the input metrics data. """ tp = 0 fp = 0 fn = 0 num_values = 0 num_samples = 0 final_metrics = dict() for category in raw_metrics: category_tp = raw_metrics[category]['TP'] category_fp = raw_metrics[category]['FP'] category_fn = raw_metrics[category]['FN'] final_metrics[category] = {} if category_tp > 0: final_metrics[category]['precision'] = category_tp / (category_tp + category_fp) final_metrics[category]['recall'] = category_tp / (category_tp + category_fn) final_metrics[category]['f1'] = f_beta(final_metrics[category]['precision'], final_metrics[category]['recall'], 1 ) if 'num_values' in raw_metrics[category]: final_metrics[category]['num_values'] = raw_metrics[category]['num_values'] if 'num_samples' in raw_metrics[category]: final_metrics[category]['num_samples'] = raw_metrics[category]['num_samples'] tp += category_tp fp += category_fp fn += category_fn num_values += final_metrics[category]['num_values'] num_samples += final_metrics[category]['num_samples'] if (tp + fp) > 0: final_metrics['precision'] = tp / (tp + fp) else: final_metrics['precision'] = np.nan if (tp + fn) > 0: final_metrics['recall'] = tp / (tp + fn) else: final_metrics['recall'] = np.nan final_metrics['f1'] = f_beta(final_metrics['precision'], final_metrics['recall'], 1) final_metrics['f0.5'] = f_beta(final_metrics['precision'], final_metrics['recall'], 0.5) final_metrics['num_values'] = num_values final_metrics['num_samples'] = num_samples if summarized: return summarize_metrics(final_metrics) else: return final_metrics
4782342efe12765a4de7d4eb9ed2b458f7d56686
3,645,329
import pprint from re import IGNORECASE def get_health_feed(): """ Parse BBC news health feed and remove articles not related to COVID-19. """ feed = parse("http://feeds.bbci.co.uk/news/health/rss.xml") # log parsed feed for debugging purposes logger.debug(pprint(feed.entries)) logger.debug(f"Feed items before removal: {len(feed.entries)}.") # Remove all feed items not related to COVID-19 for index, article in enumerate(feed.entries): if any(findall(r'Coronavirus|COVID|Covid|Covid-19', article.title, IGNORECASE)): continue else: logger.debug(f"Removing item at index: {index}.") feed.entries.pop(index) return feed
858eba4afd9ae6b47d865c0f7ba1b31d2d0f69a5
3,645,330
def get_data_meta_path(either_file_path: str) -> tuple: """get either a meta o rr binary file path and return both as a tuple Arguments: either_file_path {str} -- path of a meta/binary file Returns: [type] -- (binary_path, meta_path) """ file_stripped = '.'.join(either_file_path.split('.')[:-1]) return tuple([file_stripped + ext for ext in ['.bin', '.meta']])
0456186cd99d5899e2433ac9e44ba0424077bcc0
3,645,331
import click def group(name): """ Allow to create a group with a default click context and a class for Click's ``didyoueamn`` without having to repeat it for every group. """ return click.group( name=name, context_settings=CLICK_CONTEXT_SETTINGS, cls=AliasedGroup)
5a36442760cdb86bb89d76bf88c3aa2f3d5dea5b
3,645,332
def get_files(target_files, config): """Retrieve files associated with the potential inputs. """ out = [] find_fn = _find_file(config) for fname in target_files.keys(): remote_fname = find_fn(fname) if remote_fname: out.append(remote_fname) return out
577feb99d15eeec5e22d96dd9fce47a311d60cad
3,645,333
def cmd(func, *args, **kwargs): """Takes a function followed by its arguments""" def command(*a, **ka): return func(*args, **kwargs) return command
9ace378335461080b51dce4936c9a8e0965b3454
3,645,334
def flow_accumulation(receiver_nodes, baselevel_nodes, node_cell_area=1.0, runoff_rate=1.0, boundary_nodes=None): """Calculate drainage area and (steady) discharge. Calculates and returns the drainage area and (steady) discharge at each node, along with a downstream-to-upstream ordered list (array) of node IDs. Examples -------- >>> import numpy as np >>> from landlab.components.flow_accum import flow_accumulation >>> r = np.array([2, 5, 2, 7, 5, 5, 6, 5, 7, 8])-1 >>> b = np.array([4]) >>> a, q, s = flow_accumulation(r, b) >>> a array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.]) >>> q array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.]) >>> s array([4, 1, 0, 2, 5, 6, 3, 8, 7, 9]) """ s = make_ordered_node_array(receiver_nodes, baselevel_nodes) #Note that this ordering of s DOES INCLUDE closed nodes. It really shouldn't! #But as we don't have a copy of the grid accessible here, we'll solve this #problem as part of route_flow_dn. a, q = find_drainage_area_and_discharge(s, receiver_nodes, node_cell_area, runoff_rate, boundary_nodes) return a, q, s
e3a7801ed4639ad8168491c4a1689c37adfe930f
3,645,335
def extract_ids(response_content): """Given a result's content of a research, returns a list of all ids. This method is meant to work with PubMed""" ids = str(response_content).split("<Id>") ids_str = "".join(ids) ids = ids_str.split("</Id>") ids.remove(ids[0]) ids.remove(ids[len(ids) - 1]) for i in range(len(ids)): ids[i] = int(ids[i][2:]) return ids
69ad17a9a6bc3b56a11dceafb802fbf7eb1eac66
3,645,336
def gatorosc(candles: np.ndarray, sequential=False) -> GATOR: """ Gator Oscillator by Bill M. Williams :param candles: np.ndarray :param sequential: bool - default=False :return: float | np.ndarray """ if not sequential and len(candles) > 240: candles = candles[-240:] jaw = shift(smma(candles, period=13, sequential=True), 8) teeth = shift(smma(candles, period=8, sequential=True), 5) lips = shift(smma(candles, period=5, sequential=True), 3) upper = np.abs(jaw - teeth) lower = -np.abs(teeth - lips) upper_change = talib.MOM(upper, timeperiod=1) lower_change = -talib.MOM(lower, timeperiod=1) if sequential: return GATOR(upper, lower, upper_change, lower_change) else: return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])
2890fa42836ea020ebb54427f7b3c8a773cf13c5
3,645,337
def program_item(prog_hash): """ GET,DELETE /programs/<prog_hash>: query programs :prog_hash: program checksum/identifier :returns: flask response """ if request.method == 'GET': with client.client_access() as c: prog = c.user_programs.get(prog_hash) return respond_json(prog.properties) if prog else respond_error(404) else: raise NotImplementedError
7a27d4083facc02e71e08a9bffda217fadc5a22e
3,645,338
import json import logging def lambda_handler(event, context): """ Federate Token Exchange Lambda Function """ if not "body" in event: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) input_json = dict() input_json = json.loads(event["body"]) # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) response_type = "code" if "response_type" in input_json: response_type = input_json["response_type"] # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) client_id = input_json["client_id"] redirect_uri = input_json["redirect_uri"] _, msg = helper.verify_client_id_and_redirect_uri( user_pool_id=USER_POOL_ID, client_id=client_id, redirect_uri=redirect_uri ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) federate_account = None platform = input_json["platform"].lower() platform_login_data = dict() platform_login_data["platform"] = platform # register the federate record in the user table if ( "id_token" in input_json or "access_token" in input_json or "platform_code" in input_json ): if "platform_code" in input_json: platform_code = input_json["platform_code"] secret_client = boto3.client("secretsmanager", region_name="ap-southeast-1") if platform == "linkedin": secret = secret_client.get_secret_value(SecretId=LINKEDIN_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] if "platform_redirect_uri" not in input_json: return helper.build_response( { "message": "You do not have permission to access this resource." }, 403, ) platform_redirect_uri = input_json["platform_redirect_uri"] resp, msg = federate.linkedin_code_to_access_token( linkedin_client_id=platform_client_id, linkedin_client_secret=platform_client_secret, linkedin_redirect_uri=platform_redirect_uri, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] elif platform == "facebook": secret = secret_client.get_secret_value(SecretId=FACEBOOK_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] resp, msg = federate.facebook_code_to_access_token( facebook_client_id=platform_client_id, facebook_client_secret=platform_client_secret, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] elif platform == "google": secret = secret_client.get_secret_value(SecretId=GOOGLE_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] resp, msg = federate.google_code_to_access_token( google_client_id=platform_client_id, google_client_secret=platform_client_secret, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] if "id_token" in input_json: platform_login_data["id_token"] = input_json["id_token"] if "access_token" in input_json: platform_login_data["access_token"] = input_json["access_token"] federate_account, msg = federate.verify_federate_and_register_or_get_user( user_table_name=USER_TABLE_NAME, platform_login_data=platform_login_data, mode="get", ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) token_response = dict() token_response["platform"] = platform if "id_token" in platform_login_data: token_response["platform_id_token"] = platform_login_data["id_token"] if "access_token" in platform_login_data: token_response["platform_access_token"] = platform_login_data["access_token"] if not federate_account is None: # if 3rd party access_token validated correctly, check we generate our own token using CUSTOM_AUTH challenge password = "" resp, msg = helper.initiate_auth( USER_POOL_ID, federate_account["cognito_email"], password, client_id, auth_flow="CUSTOM_AUTH", ) # cognito error message check if msg != None: logger.info(msg) return helper.build_response({"message": msg}, 403) logger.info("CHALLENGE PASSED") if "AuthenticationResult" in resp: formatted_authentication_result = helper.format_authentication_result(resp) if response_type == "code": # get the authorization code auth_code, msg = helper.store_token_to_dynamodb_and_get_auth_code( auth_code_table_name=AUTH_CODE_TABLE_NAME, client_id=client_id, redirect_uri=redirect_uri, token_set=formatted_authentication_result, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # return the authorization code return helper.build_response({"code": auth_code}, 200) elif response_type == "token": token_response["access_token"] = formatted_authentication_result[ "access_token" ] token_response["id_token"] = formatted_authentication_result["id_token"] token_response["refresh_token"] = formatted_authentication_result[ "refresh_token" ] token_response["expires_in"] = formatted_authentication_result[ "expires_in" ] token_response["token_type"] = formatted_authentication_result[ "token_type" ] else: return helper.build_response( {"message": "Unsupported response type."}, 403 ) logger.info(token_response) return helper.build_response(token_response, 200)
16456ebb905cdb2b1782a1017928574e4c90b9cd
3,645,339
from typing import List def find_domain_field(fields: List[str]): """Find and return domain field value.""" field_index = 0 for field in fields: if field == "query:": field_value = fields[field_index + 1] return field_value field_index += 1 return None
fac45f0bd7cead3ad1ec01307c6c623c8d39dbd4
3,645,340
def placeValueOf(num: int, place: int) -> int: """ Get the value on the place specified. :param num: The num :param place: The place. 1 for unit place, 10 for tens place, 100 for hundreds place. :return: The value digit. """ return lastDigitOf(num // place)
8b50ca8a79b267f40b2638b331879746e0bcad7f
3,645,341
def prepare_polygon_coords_for_bokeh(countries): """Prepares the country polygons for plotting with Bokeh. To plot series of polygons, Bokeh needs two lists of lists (one for x coordinates, and another for y coordinates). Each element in the outer list represents a single polygon, and each element in the inner lists represents the coordinate for a single point in given polygon. This function takes a GeoDataFrame with a given set of countries, and returns Bokeh-friendly lists of x coordinates and y coordinates for those countries. PARAMETERS: ----------- countries: GeoDataFrame with a given set of countries. OUTPUTS: -------- x_coords, y_coords: Bokeh-friendly lists of x and y coordinates for those countries. """ # Simplify shapes (to resolution of 10000 meters), convert polygons to multipolygons. list_of_polygons = [] for raw_poly in countries['geometry']: raw_poly = raw_poly.simplify(10000, preserve_topology=False) if isinstance(raw_poly, Polygon): raw_poly = MultiPolygon([raw_poly]) for poly in list(raw_poly): list_of_polygons.append(poly) # Create lists of lists. x_coords = [list(poly.exterior.coords.xy[0]) for poly in list_of_polygons] y_coords = [list(poly.exterior.coords.xy[1]) for poly in list_of_polygons] return x_coords, y_coords
1d325e895cf8efdcaf69ae1ebcb369216e3378de
3,645,342
def get_incident_ids_as_options(incidents): """ Collect the campaign incidents ids form the context and return them as options for MultiSelect field :type incidents: ``list`` :param incidents: the campaign incidents to collect ids from :rtype: ``dict`` :return: dict with the ids as options for MultiSelect field e.g {"hidden": False, "options": ids} """ try: ids = [str(incident['id']) for incident in incidents] ids.sort(key=lambda incident_id: int(incident_id)) ids.insert(0, ALL_OPTION) return {"hidden": False, "options": ids} except KeyError as e: raise DemistoException(NO_ID_IN_CONTEXT) from e
ea44808dfa7b5cb6aa43951062bf3a2401f0c588
3,645,343
from typing import List import glob import csv def get_result(dir_path: str) -> List[float]: """試合のログ(csv)から勝敗データを抽出する Args: file_path (str): 抽出したい試合のログが格納されているパス Returns: List[float]: 勝率データ """ files = glob.glob(dir_path + "*.csv") result = [] for file in files: csv_file = open(file, "r") csv_data = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True) win = 0 lose = 0 for data in csv_data: if int(data[1]) >= int(data[2]): win += 1 else: lose += 1 result.append(win/(win+lose)) return result
52f6e1d5e432ec1d56524654cba2ddae9c60426c
3,645,344
def get_internal_energies( compounds: dict, qrrho: bool = True, temperature: float = 298.15 ): """Obtain internal energies for compounds at a given temperature. Parameters ---------- compounds : dict-like A descriptor of the compounds. Mostly likely, this comes from a parsed input file. See `overreact.io.parse_model`. qrrho : bool, optional Apply the quasi-rigid rotor harmonic oscillator (QRRHO) approximation of M. Head-Gordon and others (see [*J. Phys. Chem. C* **2015**, 119, 4, 1840–1850](http://dx.doi.org/10.1021/jp509921r)) on top of the classical RRHO. temperature : array-like, optional Absolute temperature in Kelvin. Returns ------- array-like Examples -------- >>> import overreact as rx >>> from overreact import _constants as constants >>> model = rx.parse_model("data/ethane/B97-3c/model.k") >>> internal_energies = get_internal_energies(model.compounds) >>> (internal_energies - internal_energies.min()) / constants.kcal array([0. , 2.20053981]) """ compounds = rx.io._check_compounds(compounds) internal_energies = [] for name in compounds: logger.info(f"calculate internal energy: {name}") # TODO(schneiderfelipe): inertia might benefit from caching moments, _, _ = coords.inertia( compounds[name].atommasses, compounds[name].atomcoords ) internal_energy = rx.thermo.calc_internal_energy( energy=compounds[name].energy, degeneracy=compounds[name].mult, moments=moments, vibfreqs=compounds[name].vibfreqs, qrrho=qrrho, temperature=temperature, ) internal_energies.append(internal_energy) return np.array(internal_energies)
788955bed2cc643b5440168c5debde41986df570
3,645,345
from typing import Tuple from typing import Dict from typing import Iterable from typing import List from typing import Any import os import json def load_folder(folder: str) -> Tuple[Dict[str, Iterable[List[str]]], Dict[str, Any]]: """ Loads data from the folder output using neurips_crawler output/data_<year>/papers_data.jsons output/data_<year>/pdfs/<files> where - <year> is a 4 digits year associated to the year of the Neurips conference. - papers_data.json is a metadata file for each paper in this conference - <files> are the raw PDF file for this conference """ year_data = {} with open(os.path.join(folder, 'papers_data.jsons'), 'r') as f: for line in f.readlines(): paper_data = json.loads(line.strip()) year_data[paper_data['pdf_name']] = paper_data files = {} for file in os.listdir(os.path.join(folder, 'pdfs')): files[file] = pdf_parser.get_text(os.path.join(folder, 'pdfs', file), local=True) return files, year_data
088279b5f3d66436e23ca6b2663cf6a79fdaa7c4
3,645,346
def get_local_info(hass): """Get HA's local location config.""" latitude = hass.config.latitude longitude = hass.config.longitude timezone = str(hass.config.time_zone) elevation = hass.config.elevation return latitude, longitude, timezone, elevation
1fdefbad46c7cdb58abdc36f7d8799aa1e4af87c
3,645,347
def if_present_phrase(src_str_tokens, phrase_str_tokens): """ :param src_str_tokens: a list of strings (words) of source text :param phrase_str_tokens: a list of strings (words) of a phrase :return: """ match_pos_idx = -1 for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1): match_flag = True # iterate each word in target, if one word does not match, set match=False and break for seq_idx, seq_w in enumerate(phrase_str_tokens): src_w = src_str_tokens[src_start_idx + seq_idx] if src_w != seq_w: match_flag = False break if match_flag: match_pos_idx = src_start_idx break return match_flag, match_pos_idx
37297c78bb26c7cda28010e1f7567a19e2f875ee
3,645,348
def fit_2D_xanes_non_iter(img_xanes, eng, spectrum_ref, error_thresh=0.1): """ Solve equation of Ax=b, where: Inputs: ---------- A: reference spectrum (2-colume array: xray_energy vs. absorption_spectrum) X: fitted coefficient of each ref spectrum b: experimental 2D XANES data Outputs: ---------- fit_coef: the 'x' in the equation 'Ax=b': fitted coefficient of each ref spectrum cost: cost between fitted spectrum and raw data """ num_ref = len(spectrum_ref) spec_interp = {} comp = {} A = [] s = img_xanes.shape for i in range(num_ref): tmp = interp1d( spectrum_ref[f"ref{i}"][:, 0], spectrum_ref[f"ref{i}"][:, 1], kind="cubic" ) A.append(tmp(eng).reshape(1, len(eng))) spec_interp[f"ref{i}"] = tmp(eng).reshape(1, len(eng)) comp[f"A{i}"] = spec_interp[f"ref{i}"].reshape(len(eng), 1) comp[f"A{i}_t"] = comp[f"A{i}"].T # e.g., spectrum_ref contains: ref1, ref2, ref3 # e.g., comp contains: A1, A2, A3, A1_t, A2_t, A3_t # A1 = ref1.reshape(110, 1) # A1_t = A1.T A = np.squeeze(A).T M = np.zeros([num_ref + 1, num_ref + 1]) for i in range(num_ref): for j in range(num_ref): M[i, j] = np.dot(comp[f"A{i}_t"], comp[f"A{j}"]) M[i, num_ref] = 1 M[num_ref] = np.ones((1, num_ref + 1)) M[num_ref, -1] = 0 # e.g. # M = np.array([[float(np.dot(A1_t, A1)), float(np.dot(A1_t, A2)), float(np.dot(A1_t, A3)), 1.], # [float(np.dot(A2_t, A1)), float(np.dot(A2_t, A2)), float(np.dot(A2_t, A3)), 1.], # [float(np.dot(A3_t, A1)), float(np.dot(A3_t, A2)), float(np.dot(A3_t, A3)), 1.], # [1., 1., 1., 0.]]) M_inv = np.linalg.inv(M) b_tot = img_xanes.reshape(s[0], -1) B = np.ones([num_ref + 1, b_tot.shape[1]]) for i in range(num_ref): B[i] = np.dot(comp[f"A{i}_t"], b_tot) x = np.dot(M_inv, B) x = x[:-1] x[x < 0] = 0 x_sum = np.sum(x, axis=0, keepdims=True) x = x / x_sum cost = np.sum((np.dot(A, x) - b_tot) ** 2, axis=0) / s[0] cost = cost.reshape(s[1], s[2]) x = x.reshape(num_ref, s[1], s[2]) # cost = compute_xanes_fit_cost(img_xanes, x, spec_interp) mask = compute_xanes_fit_mask(cost, error_thresh) mask = mask.reshape(s[1], s[2]) mask_tile = np.tile(mask, (x.shape[0], 1, 1)) x = x * mask_tile cost = cost * mask return x, cost
2146223aae8bf5ac13f658134a09c5682219777d
3,645,349
def get_cmap(n_fg): """Generate a color map for visualizing foreground objects Args: n_fg (int): Number of foreground objects Returns: cmaps (numpy.ndarray): Colormap """ cmap = cm.get_cmap('Set1') cmaps = [] for i in range(n_fg): cmaps.append(np.asarray(cmap(i))[:3]) cmaps = np.vstack(cmaps) return cmaps
010df9e117d724de398eeb919417a71795aad460
3,645,350
def GetBasinOutlines(DataDirectory, basins_fname): """ This function takes in the raster of basins and gets a dict of basin polygons, where the key is the basin key and the value is a shapely polygon of the basin. IMPORTANT: In this case the "basin key" is usually the junction number: this function will use the raster values as keys and in general the basin rasters are output based on junction indices rather than keys Args: DataDirectory (str): the data directory with the basin raster basins_fname (str): the basin raster Returns: list of shapely polygons with the basins Author: FJC """ # read in the basins raster this_fname = basins_fname.split('.') print(basins_fname) OutputShapefile = this_fname[0]+'.shp' # polygonise the raster BasinDict = LSDMap_IO.PolygoniseRaster(DataDirectory, basins_fname, OutputShapefile) return BasinDict
0731451ff765318d63f36950be88dd5c73504bf0
3,645,351
def detect_park(frame, hsv): """ Expects: HSV image of any shape + current frame Returns: TBD """ #hsv = cv2.cvtColor(frame, cfg.COLOUR_CONVERT) # convert to HSV CS # filter mask = cv2.inRange(hsv, lower_green_park, upper_green_park) # operations mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=1) mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel,iterations=1) img = cv2.bitwise_and(frame,frame,mask = mask) # logic height, width = mask.shape[:2] contours, _ = cv2.findContours(mask[0:int(height/2), 0:width], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: area = cv2.contourArea(cnt) # calculate area of the contour x,y,w,h = cv2.boundingRect(cnt) # create a rectangle around the contour #roi = frame[y:y+h, x:x+w] # select an ROI out of the frame # check if the ROI is in allowed area vr = valid_range(x,y,w,h,frame) if not vr: continue # calculate ratio of sides - anything not square is not worth checking sr = is_squarish(h, w) if not sr: continue # check the area size (too small ignore, too big ignore) if cfg.AREA_SIZE_PARK < area < cfg.MAX_AREA_SIZE: #and ( w / h < 1.0): if cfg.DEMO_MODE: cv2.rectangle(frame, (x,y), (x+w, y+h), (127,255,127), 2) cv2.putText(frame, "PARK", (x,y), cfg.FONT, 2, (127,255,127)) return "park" return None
5cd63590741ac005e7b05090ae77bca6623cf420
3,645,352
def normalize(mx): """Row-normalize sparse matrix""" mx = np.array(mx) rowsum = mx.sum(axis=1) r_inv = np.power(rowsum, -1.0).flatten() #use -1.0 as asym matrix r_inv[np.isinf(r_inv)] = 0. r_mat_inv = np.diag(r_inv) a = np.dot(r_mat_inv, mx) #a = np.dot(a, r_mat_inv) #skip for asym matrix #return a #normalized matrix return mx
6351bc777731eed2119e59ee411d7338e55d2ced
3,645,353
def th_allclose(x, y): """ Determine whether two torch tensors have same values Mimics np.allclose """ return th.sum(th.abs(x-y)) < 1e-5
e788192dede11e9af8bef08b7aff39440e0fe318
3,645,354
import h5py def _check_h5_installed(strict=True): """Aux function.""" try: return h5py except ImportError: if strict is True: raise RuntimeError('For this functionality to work, the h5py ' 'library is required.') else: return False
732300ff4171366c8a3328669068120e21411890
3,645,355
def calc_c_o(row): """ C or O excess if (C/O>1): excess = log10 [(YC/YH) - (YO/YH)] + 12 if C/O<1: excess = log10 [(YO/YH) - (YC/YH)] + 12 where YC = X(C12)/12 + X(C13)/13 YO = X(O16)/16 + X(O17)/17 + X(O18)/18 YH = XH/1.00794 """ yh = row['H'] / 1.00794 yc = row['C12'] / 12. + row['C13'] / 13. yo = row['O16'] / 16. + row['O17'] / 17. + row['O18'] / 18. if row['CO'] > 1: excess = np.log10((yc / yh) - (yo / yh)) + 12. else: excess = np.log10((yo / yh) - (yc / yh)) + 12. return excess
16677f983e17465a509f2b27ec1866d3e56f00da
3,645,356
import json def create_job_from_file(job_file): """Creates a job from a JSON job specification. :param job_file: Path to job file. :type job_file: str :returns: Job object of specified type. """ logger.info("Creating Job from {}.".format(job_file)) with open(job_file) as f: params = json.loads(f.read()) try: if not params['type'] in job_types: raise utils.JobDescriptionValueError('Job type {} is not valid.'.format(params['type'])) except KeyError as e: raise utils.JobDescriptionKeyError(e.message) params['job_file'] = job_file return job_types[params['type']](params)
3e1e2eaa1892dafc310fcb48abd096a59cb9b5a0
3,645,357
def compile_insert_unless_conflict( stmt: irast.InsertStmt, typ: s_objtypes.ObjectType, *, ctx: context.ContextLevel, ) -> irast.OnConflictClause: """Compile an UNLESS CONFLICT clause with no ON This requires synthesizing a conditional based on all the exclusive constraints on the object. """ pointers = _get_exclusive_ptr_constraints(typ, ctx=ctx) obj_constrs = typ.get_constraints(ctx.env.schema).objects(ctx.env.schema) select_ir, always_check, _ = compile_conflict_select( stmt, typ, constrs=pointers, obj_constrs=obj_constrs, parser_context=stmt.context, ctx=ctx) return irast.OnConflictClause( constraint=None, select_ir=select_ir, always_check=always_check, else_ir=None)
feaa0f0ea54ee51d78fe3b95c3ef20e6ea6bb4e2
3,645,358
import io def plot_to_image(figure): """ Converts the matplotlib plot specified by "figure" to a PNG image and returns it. The supplied figure is closed and inaccessible after this call. """ # Save the plot to a PNG in memory buf = io.BytesIO() figure.savefig(buf, format="png") buf.seek(0) # Convert PNG buffer to TF image trans = transforms.ToTensor() image = buf.getvalue() image = Image.open(io.BytesIO(image)) image = trans(image) return image
14b9f223372f05f32fc096a7dafcbce273b33d0d
3,645,359
def sent2vec(model, words): """文本转换成向量 Arguments: model {[type]} -- Doc2Vec 模型 words {[type]} -- 分词后的文本 Returns: [type] -- 向量数组 """ vect_list = [] for w in words: try: vect_list.append(model.wv[w]) except: continue vect_list = np.array(vect_list) vect = vect_list.sum(axis=0) return vect / np.sqrt((vect ** 2).sum())
06569e2bdb13d31b1218ab9a3070affe626fd915
3,645,360
import requests def postXML(server: HikVisionServer, path, xmldata=None): """ This returns the response of the DVR to the following POST request Parameters: server (HikvisionServer): The basic info about the DVR path (str): The ISAPI path that will be executed xmldata (str): This should be formatted using `utils.dict2xml` This is the data that will be transmitted to the server. It is optional. """ headers = {'Content-Type': 'application/xml'} responseRaw = requests.post( server.address() + path, data=xmldata, headers=headers, auth=HTTPDigestAuth(server.user, server.password)) if responseRaw.status_code == 401: raise Exception("Wrong username or password") responseXML = responseRaw.text return responseXML
a5566e03b13b0938e84928dc09b6509e2dfd8a12
3,645,361
import requests def get_government_trading(gov_type: str, ticker: str = "") -> pd.DataFrame: """Returns the most recent transactions by members of government Parameters ---------- gov_type: str Type of government data between: 'congress', 'senate', 'house', 'contracts', 'quarter-contracts' and 'corporate-lobbying' ticker : str Ticker to get congress trading data from Returns ------- pd.DataFrame Most recent transactions by members of U.S. Congress """ if gov_type == "congress": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/congresstrading/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/congresstrading" elif gov_type.lower() == "senate": if ticker: url = f"https://api.quiverquant.com/beta/historical/senatetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/senatetrading" elif gov_type.lower() == "house": if ticker: url = f"https://api.quiverquant.com/beta/historical/housetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/housetrading" elif gov_type.lower() == "contracts": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/govcontractsall/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/govcontractsall" elif gov_type.lower() == "quarter-contracts": if ticker: url = f"https://api.quiverquant.com/beta/historical/govcontracts/{ticker}" else: url = "https://api.quiverquant.com/beta/live/govcontracts" elif gov_type.lower() == "corporate-lobbying": if ticker: url = f"https://api.quiverquant.com/beta/historical/lobbying/{ticker}" else: url = "https://api.quiverquant.com/beta/live/lobbying" else: return pd.DataFrame() headers = { "accept": "application/json", "X-CSRFToken": "TyTJwjuEC7VV7mOqZ622haRaaUr0x0Ng4nrwSRFKQs7vdoBcJlK9qjAS69ghzhFu", # pragma: allowlist secret "Authorization": f"Token {API_QUIVERQUANT_KEY}", } response = requests.get(url, headers=headers) if response.status_code == 200: if gov_type in ["congress", "senate", "house"]: return pd.DataFrame(response.json()).rename( columns={"Date": "TransactionDate", "Senator": "Representative"} ) return pd.DataFrame(response.json()) return pd.DataFrame()
ba3599d22825cd4a3ed3cb71f384561627067b71
3,645,362
def pf_mobility(phi, gamma): """ Phase field mobility function. """ # return gamma * (phi**2-1.)**2 # func = 1.-phi**2 # return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func return gamma
10045807bdb030c362d700d61789c0a490aad93b
3,645,363
def print_df_stats(df: pd.DataFrame, df_train: pd.DataFrame, df_val: pd.DataFrame, df_test: pd.DataFrame, label_encoder, prediction): """ Print some statistics of the splitted dataset. """ try: labels = list(label_encoder.classes_) except AttributeError: labels = [] headers = ["Images"] for label in labels: headers.append("-> " + str(label)) def get_stats(df): lenghts = [len(df)] for label in range(len(labels)): df_label = df[df[DF_DICT[prediction]] == label] lenghts.append( str(len(df_label)) + " (" + str(round((len(df_label) / len(df)), 2)) + ")" ) return lenghts stats = [] stats.append(["All"] + get_stats(df)) stats.append(["Train"] + get_stats(df_train)) stats.append(["Val"] + get_stats(df_val)) stats.append(["Test"] + get_stats(df_test)) print(tabulate(stats, headers=headers)) print()
bb52799de86b069b4c480fd94c2eaf501617284f
3,645,364
import os import sys import tempfile import shutil import logging import subprocess import csv import json def hivtrace(id, input, reference, ambiguities, threshold, min_overlap, compare_to_lanl, fraction, strip_drams_flag=False, filter_edges="no", handle_contaminants="remove", skip_alignment=False, save_intermediate=True, prior=None ): """ PHASE 1) Pad sequence alignment to HXB2 length with bealign PHASE 2) Convert resulting bam file back to FASTA format PHASE 2b) Rename any duplicates in FASTA file PHASE 3) Strip DRAMs if requested PHASE 3b) Filtering contaminants before TN93 run if requested PHASE 4) TN93 analysis on the supplied FASTA file alone PHASE 5) Run hivclustercsv to return clustering information in JSON format PHASE 5b) Attribute annotations to results from (4) PHASE 6) Run tn93 against LANL if user elects to PHASE 6b) Concatenate results from pre-run LANL tn93, user tn93, and (5) analyses PHASE 6c) Flag any potential HXB2 sequences PHASE 7) Run hivclustercsv to return clustering information in json format """ results_json = {} # Declare reference file resource_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'rsrc') # These should be defined in the user's environment env_dir = os.path.dirname(sys.executable) PYTHON = sys.executable # Try python's system executable first, then the user's path. if (os.path.isfile(os.path.join(env_dir, 'bealign'))): BEALIGN = os.path.join(env_dir, 'bealign') else: BEALIGN = 'bealign' if (os.path.isfile(os.path.join(env_dir, 'bam2msa'))): BAM2MSA = os.path.join(env_dir, 'bam2msa') else: BAM2MSA = 'bam2msa' if (os.path.isfile(os.path.join(env_dir, 'hivnetworkcsv'))): HIVNETWORKCSV = os.path.join(env_dir, 'hivnetworkcsv') else: HIVNETWORKCSV = 'hivnetworkcsv' TN93DIST = 'tn93' # This will have to be another parameter LANL_FASTA = os.path.join(resource_dir, 'LANL.FASTA') LANL_TN93OUTPUT_CSV = os.path.join(resource_dir, 'LANL.TN93OUTPUT.csv') DEFAULT_DELIMITER = '|' # Check if LANL files exists. If not, then check if zip file exists, # otherwise throw error try: if not os.path.isfile(LANL_FASTA): lanl_zip = os.path.join(resource_dir, 'LANL.FASTA.gz') gunzip_file(lanl_zip, LANL_FASTA) if not os.path.isfile(LANL_TN93OUTPUT_CSV): lanl_tn93output_zip = os.path.join(resource_dir, 'LANL.TN93OUTPUT.csv.gz') gunzip_file(lanl_tn93output_zip, LANL_TN93OUTPUT_CSV) except e: # pragma: no cover print("Oops, missing a resource file") raise # Python Parameters SCORE_MATRIX = 'HIV_BETWEEN_F' OUTPUT_FORMAT = 'csv' SEQUENCE_ID_FORMAT = 'plain' # Intermediate filenames tmp_path = tempfile.mkdtemp(prefix='hivtrace-') basename = os.path.basename(input) BAM_FN = os.path.join(tmp_path, basename + '_output.bam') # Check if save output_fasta_fn OUTPUT_FASTA_FN = os.path.join(tmp_path, basename + '_output.fasta') if save_intermediate: OUTPUT_FASTA_FN = input + '_output.fasta' OUTPUT_TN93_FN = os.path.join(tmp_path, basename + '_user.tn93output.csv') OUTPUT_TN93_CONTAM_FN = os.path.join(tmp_path, basename + '_contam.tn93output.csv') DEST_TN93_FN = OUTPUT_TN93_FN if save_intermediate: DEST_TN93_FN = input + '_user.tn93output.csv' JSON_TN93_FN = os.path.join(tmp_path, basename + '_user.tn93output.json') JSON_TN93_CONTAM_FN = os.path.join(tmp_path, basename + '_contam.tn93output.json') OUTPUT_COMBINED_SEQUENCE_FILE = os.path.join( tmp_path, basename + "_combined_user_lanl.fasta") OUTPUT_CLUSTER_JSON = os.path.join(tmp_path, basename + '_user.trace.json') LANL_OUTPUT_CLUSTER_JSON = os.path.join(tmp_path, basename + '_lanl_user.trace.json') OUTPUT_USERTOLANL_TN93_FN = os.path.join( tmp_path, basename + '_usertolanl.tn93output.csv') USER_LANL_TN93OUTPUT = os.path.join(tmp_path, basename + '_userlanl.tn93output.csv') USER_FILTER_LIST = os.path.join(tmp_path, basename + '_user_filter.csv') CONTAMINANT_ID_LIST = os.path.join(tmp_path, basename + '_contaminants.csv') # File handler for output we don't care about DEVNULL = open(os.devnull, 'w') EXCLUSION_LIST = None # Check for incompatible statements if skip_alignment and compare_to_lanl: raise Exception( "You have passed arguments that are incompatible! You cannot compare to the public database if you elect to submit a pre-made alignment! Please consider the issue before trying again." ) if skip_alignment: # Check for equal length in all sequences seqs = fasta_iter(input) seq_length = len(seqs.__next__()[1]) if (any(len(seq[1]) != seq_length for seq in seqs)): raise Exception("Not all input sequences have the same length!") # copy input file to output fasta file shutil.copyfile(input, OUTPUT_FASTA_FN) else: # PHASE 1 update_status(id, phases.ALIGNING, status.RUNNING) if handle_contaminants is None: handle_contaminants = 'no' bealign_process = [ BEALIGN, '-q', '-r', reference, '-m', SCORE_MATRIX, '-R', input, BAM_FN ] if handle_contaminants != 'no': bealign_process.insert(-3, '-K') logging.debug(' '.join(bealign_process)) subprocess.check_call(bealign_process, stdout=DEVNULL) update_status(id, phases.ALIGNING, status.COMPLETED) # PHASE 2 update_status(id, phases.BAM_FASTA_CONVERSION, status.RUNNING) bam_process = [BAM2MSA, BAM_FN, OUTPUT_FASTA_FN] logging.debug(' '.join(bam_process)) subprocess.check_call(bam_process, stdout=DEVNULL) update_status(id, phases.BAM_FASTA_CONVERSION, status.COMPLETED) if handle_contaminants != 'no' and handle_contaminants != 'separately': with (open(OUTPUT_FASTA_FN, 'r')) as msa: reference_name = next(SeqIO.parse(msa, 'fasta')).id logging.debug('Reference name set to %s' % reference_name) with open(CONTAMINANT_ID_LIST, 'w') as contaminants: print(reference_name, file=contaminants) # Ensure unique ids # Warn of duplicates by annotating with an attribute rename_duplicates(OUTPUT_FASTA_FN, DEFAULT_DELIMITER) attribute_map = ('SOURCE', 'SUBTYPE', 'COUNTRY', 'ACCESSION_NUMBER', 'YEAR_OF_SAMPLING') # PHASE 3 # Strip DRAMS if strip_drams_flag: OUTPUT_FASTA_FN_TMP = OUTPUT_FASTA_FN + ".spool" with open(str(OUTPUT_FASTA_FN_TMP), 'w') as output_file: for (seq_id, data) in sd.strip_drams(OUTPUT_FASTA_FN, strip_drams_flag): print(">%s\n%s" % (seq_id, data), file=output_file) shutil.move(OUTPUT_FASTA_FN_TMP, OUTPUT_FASTA_FN) # PHASE 3b Filter contaminants if handle_contaminants == 'separately': update_status(id, phases.FILTER_CONTAMINANTS, status.RUNNING) with open(JSON_TN93_CONTAM_FN, 'w') as tn93_contam_fh: tn93_contam_process = [ TN93DIST, '-q', '-o', OUTPUT_TN93_CONTAM_FN, '-t', '0.015', '-a', 'resolve', '-l', min_overlap, '-g', '1.0', '-s', reference, '-f', OUTPUT_FORMAT, OUTPUT_FASTA_FN ] logging.debug(' '.join(tn93_contam_process)) subprocess.check_call( tn93_contam_process, stdout=tn93_contam_fh, stderr=tn93_contam_fh) # shutil.copyfile(OUTPUT_TN93_FN, DEST_TN93_FN) update_status(id, phases.FILTER_CONTAMINANTS, status.COMPLETED) # Process output for contaminants and remove them from the file # Store the contaminants for reporting later with open(OUTPUT_TN93_CONTAM_FN, 'r') as tn93_contam_fh: tn93reader = csv.reader( tn93_contam_fh, delimiter=',', quotechar='|') tn93reader.__next__() contams = [row[0] for row in tn93reader] OUTPUT_FASTA_FN_TMP = OUTPUT_FASTA_FN + ".contam.tmp" # Remove contams from FASTA file with (open(OUTPUT_FASTA_FN, 'r')) as msa_fn: msa = SeqIO.parse(msa_fn, 'fasta') filtered_msa = filter(lambda x: x.id not in contams, msa) # Write to new TMP file with open(OUTPUT_FASTA_FN_TMP, "w") as output_handle: SeqIO.write(filtered_msa, output_handle, "fasta") shutil.move(OUTPUT_FASTA_FN_TMP, OUTPUT_FASTA_FN) # PHASE 4 update_status(id, phases.COMPUTE_TN93_DISTANCE, status.RUNNING) with open(JSON_TN93_FN, 'w') as tn93_fh: tn93_process = [ TN93DIST, '-q', '-0', '-o', OUTPUT_TN93_FN, '-t', threshold, '-a', ambiguities, '-l', min_overlap, '-g', fraction if ambiguities == 'resolve' else '1.0', '-f', OUTPUT_FORMAT, OUTPUT_FASTA_FN ] logging.debug(' '.join(tn93_process)) subprocess.check_call(tn93_process, stdout=tn93_fh, stderr=tn93_fh) if OUTPUT_TN93_FN != DEST_TN93_FN: shutil.copyfile(OUTPUT_TN93_FN, DEST_TN93_FN) update_status(id, phases.COMPUTE_TN93_DISTANCE, status.COMPLETED) # raise an exception if tn93 file is empty if is_tn93_file_empty(DEST_TN93_FN): raise Exception(' '.join(tn93_process) + "returned empty file") # send contents of tn93 to status page id_dict = id_to_attributes(OUTPUT_TN93_FN, attribute_map, DEFAULT_DELIMITER) if type(id_dict) is ValueError: update_status(id, "Error: " + id_dict.args[0]) raise id_dict # PHASE 5 update_status(id, phases.INFERRING_NETWORK, status.RUNNING) output_cluster_json_fh = open(OUTPUT_CLUSTER_JSON, 'w') hivnetworkcsv_process = [ HIVNETWORKCSV, '-i', OUTPUT_TN93_FN, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q' ] if filter_edges and filter_edges != 'no': hivnetworkcsv_process.extend( ['-n', filter_edges, '-s', OUTPUT_FASTA_FN]) if handle_contaminants == 'report' or handle_contaminants == 'remove': hivnetworkcsv_process.extend( ['-C', handle_contaminants, '-F', CONTAMINANT_ID_LIST]) if prior: hivnetworkcsv_process.extend( ['--prior', prior]) # hivclustercsv uses stderr for status updates complete_stderr = '' returncode = None logging.debug(' '.join(hivnetworkcsv_process)) with subprocess.Popen( hivnetworkcsv_process, stdout=output_cluster_json_fh, stderr=PIPE, bufsize=1, universal_newlines=True) as p: for line in p.stderr: complete_stderr += line update_status(id, phases.INFERRING_NETWORK, status.RUNNING, complete_stderr) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError( returncode, ' '.join(hivnetworkcsv_process), complete_stderr) update_status(id, phases.INFERRING_NETWORK, status.COMPLETED, complete_stderr) output_cluster_json_fh.close() # Read and print output_cluster_json results_json["trace_results"] = json.loads( open(OUTPUT_CLUSTER_JSON, 'r').read()) # Place singleton count in Network Summary # Place contaminant nodes in Network Summary if handle_contaminants == 'separately': results_json['trace_results']['Network Summary'][ 'contaminant_sequences'] = contams if not compare_to_lanl: return results_json if compare_to_lanl: # PHASE 6 update_status(id, phases.PUBLIC_COMPUTE_TN93_DISTANCE, status.RUNNING) lanl_tn93_process = '' if ambiguities != 'resolve': lanl_tn93_process = [ TN93DIST, '-q', '-o', OUTPUT_USERTOLANL_TN93_FN, '-t', threshold, '-a', ambiguities, '-f', OUTPUT_FORMAT, '-l', min_overlap, '-s', LANL_FASTA, OUTPUT_FASTA_FN ] else: lanl_tn93_process = [ TN93DIST, '-q', '-o', OUTPUT_USERTOLANL_TN93_FN, '-t', threshold, '-a', ambiguities, '-f', OUTPUT_FORMAT, '-g', fraction, '-l', min_overlap, '-s', LANL_FASTA, OUTPUT_FASTA_FN ] logging.debug(' '.join(lanl_tn93_process)) subprocess.check_call(lanl_tn93_process, stdout=DEVNULL) update_status(id, phases.PUBLIC_COMPUTE_TN93_DISTANCE, status.COMPLETED) # send contents of tn93 to status page # PHASE 6b # Perform concatenation # This is where reference annotation becomes an issue concatenate_data(USER_LANL_TN93OUTPUT, LANL_TN93OUTPUT_CSV, OUTPUT_USERTOLANL_TN93_FN, OUTPUT_TN93_FN) lanl_id_dict = id_to_attributes(OUTPUT_TN93_FN, attribute_map, DEFAULT_DELIMITER) # Create a list from TN93 csv for hivnetworkcsv filter create_filter_list(OUTPUT_TN93_FN, USER_FILTER_LIST) # PHASE 7 update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.RUNNING) lanl_output_cluster_json_fh = open(LANL_OUTPUT_CLUSTER_JSON, 'w') if filter_edges and filter_edges != 'no': with open(OUTPUT_COMBINED_SEQUENCE_FILE, 'w') as combined_fasta: for f_path in (LANL_FASTA, OUTPUT_FASTA_FN): with open(f_path) as src_file: shutil.copyfileobj(src_file, combined_fasta) print("\n", file=combined_fasta) lanl_hivnetworkcsv_process = [ PYTHON, HIVNETWORKCSV, '-i', USER_LANL_TN93OUTPUT, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q', '-k', USER_FILTER_LIST, '-n', filter_edges, '-s', OUTPUT_COMBINED_SEQUENCE_FILE ] else: lanl_hivnetworkcsv_process = [ PYTHON, HIVNETWORKCSV, '-i', USER_LANL_TN93OUTPUT, '-t', threshold, '-f', SEQUENCE_ID_FORMAT, '-J', '-q', '-k', USER_FILTER_LIST ] if handle_contaminants == 'report' or handle_contaminants == 'remove': lanl_hivnetworkcsv_process.extend( ['-C', handle_contaminants, '-F', CONTAMINANT_ID_LIST]) logging.debug(' '.join(lanl_hivnetworkcsv_process)) # hivclustercsv uses stderr for status updates complete_stderr = '' with subprocess.Popen( lanl_hivnetworkcsv_process, stdout=lanl_output_cluster_json_fh, stderr=PIPE, bufsize=1, universal_newlines=True) as p: for line in p.stderr: complete_stderr += line update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.RUNNING, complete_stderr) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError( returncode, ' '.join(lanl_hivnetworkcsv_process), complete_stderr) lanl_output_cluster_json_fh.close() update_status(id, phases.PUBLIC_INFERRING_CONNECTIONS, status.COMPLETED) #Annotate LANL nodes with id json_info = open(LANL_OUTPUT_CLUSTER_JSON, 'r').read() if json_info: # Only include clusters that are connected to supplied nodes annotate_lanl(LANL_OUTPUT_CLUSTER_JSON, LANL_FASTA) lanl_trace_results = json.loads(json_info) results_json['lanl_trace_results'] = lanl_trace_results else: logging.debug('no lanl results!') DEVNULL.close() return results_json
d49bb2b67783e35e305cf59ed7ad923831e0d1d8
3,645,365
def parse_author_mail(author): """从形如 ``author <author-mail>`` 中分离author与mail""" pat = author_mail_re.search(author) return (pat.group(1), pat.group(2)) if pat else (author, None)
01aacee7202e701ac11177efe71984a7fb1e9a4f
3,645,366
import attr def tag(name, content='', nonclosing=False, **attrs): """ Wraps content in a HTML tag with optional attributes. This function provides a Pythonic interface for writing HTML tags with a few bells and whistles. The basic usage looks like this:: >>> tag('p', 'content', _class="note", _id="note1") '<p class="note" id="note1">content</p>' Any attribute names with any number of leading underscores (e.g., '_class') will have the underscores strpped away. If content is an iterable, the tag will be generated once per each member. >>> tag('span', ['a', 'b', 'c']) '<span>a</span><span>b</span><span>c</span>' It does not sanitize the tag names, though, so it is possible to specify invalid tag names:: >>> tag('not valid') '<not valid></not valid> .. warning:: Please ensure that ``name`` argument does not come from user-specified data, or, if it does, that it is properly sanitized (best way is to use a whitelist of allowed names). Because attributes are specified using keyword arguments, which are then treated as a dictionary, there is no guarantee of attribute order. If attribute order is important, don't use this function. This module contains a few partially applied aliases for this function. These mostly have hard-wired first argument (tag name), and are all uppercase: - ``A`` - alias for ``<a>`` tag - ``BUTTON`` - alias for ``<button>`` tag - ``HIDDEN`` - alias for ``<input>`` tag with ``type="hidden"`` attribute - ``INPUT`` - alias for ``<input>`` tag with ``nonclosing`` set to ``True`` - ``LI`` - alias for ``<li>`` tag - ``OPTION`` - alias for ``<option>`` tag - ``P`` - alias for ``<p>`` tag - ``SELECT`` - alias for ``<select>`` tag - ``SPAN`` - alias for ``<span>`` tag - ``SUBMIT`` - alias for ``<button>`` tag with ``type="submit"`` attribute - ``TEXTAREA`` - alias for ``<textarea>`` tag - ``UL`` - alias for ``<ul>`` tag """ open_tag = '<%s>' % name close_tag = '</%s>' % name attrs = ' '.join([attr(k.lstrip('_'), to_unicode(v)) for k, v in attrs.items()]) if attrs: open_tag = '<%s %s>' % (name, attrs) if nonclosing: content = '' close_tag = '' if not isinstance(content, basestring): try: return ''.join(['%s%s%s' % (open_tag, to_unicode(c), close_tag) for c in content]) except TypeError: pass return '%s%s%s' % (open_tag, to_unicode(content), close_tag)
acf4575a2c95e105ddf4231c74116d4470cf87eb
3,645,367
import random import os import json def _reconcile_phenotype(meth, fba_model_id, phenotype_id, out_model_id): """Run Gapfilling on an FBA Model [16] :param fba_model_id: an FBA model id [16.1] :type fba_model_id: kbtypes.KBaseFBA.FBAModel :ui_name fba_model_id: FBA Model ID :param phenotype_id: a phenotype simulation ID [16.2] :type phenotype_id: kbtypes.KBasePhenotypes.PhenotypeSimulationSet :ui_name phenotype_id: Phenotype Simulation Dataset ID :param out_model_id: a name for the generated FBA Model (optional) [16.3] :type out_model_id: kbtypes.KBaseFBA.FBAModel :ui_name out_model_id: Output FBA Model Name :return: something :rtype: kbtypes.Unicode :output_widget: kbaseModelMetaNarrative """ if not out_model_id: out_model_id = "model_" + ''.join([chr(random.randrange(0, 26) + ord('A')) for _ in xrange(8)]) token = os.environ['KB_AUTH_TOKEN'] workspace = os.environ['KB_WORKSPACE_ID'] fbaClient = fbaModelServices(service.URLS.fba) wildtype_phenotype_reconciliation_params = { 'auth': token, 'model_workspace': workspace, 'model': fba_model_id, 'phenotypeSet_workspace': workspace, 'phenotypeSet': phenotype_id, 'workspace': workspace, 'out_model': out_model_id, } job_id = fbaClient.queue_wildtype_phenotype_reconciliation(wildtype_phenotype_reconciliation_params)['id'] return json.dumps({'ws_name': workspace, 'model_id': out_model_id, 'job_id': job_id})
438ad093ebac8fc09372dda36c6ac3187981524d
3,645,368
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10): """ Label mesh topology entities using global ids. """ coors = cmesh.get_centroids(edim) coors = _to2d(coors) dim = cmesh.dim ax = _get_axes(ax, dim) for ii, cc in enumerate(coors): ax.text(*cc.T, s=ii, color=color, fontsize=fontsize) return ax
a3e96c090b6f439bcf5991e2df306f5305758cef
3,645,369
from datetime import datetime def build_filename(): """Build out the filename based on current UTC time.""" now = datetime.datetime.utcnow() fname = now.strftime('rib.%Y%m%d.%H00.bz2') hour = int(now.strftime('%H')) if not hour % 2 == 0: if len(str(hour)) == 1: hour = "0%d" % (hour - 1) else: hour = hour - 1 fname = now.strftime('rib.%Y%m%d.') fname = fname + str(hour) + '00.bz2' return fname
0f68b09410bf1d749bf3492e974be315d2fcaa0d
3,645,370
import torch def sample_sequence(model, length, context=None, temperature=1.0, top_k=10, sample=True, device='cuda', use_constrained_decoding=False, constrained_decoding_threshold=0.3, person_to_category_to_salient_ngram_embed=(), word_embeds=(), tokenizer=None): """ :param model: :param length: :param context: :param temperature: :param top_k: :param sample: :param device: :param use_constrained_decoding: :param constrained_decoding_threshold: :param person_to_category_to_salient_ngram_embed: :param word_embeds: :param tokenizer: :return: """ # Assume batch size of 1. context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0) orig_context_length = context.size()[-1] prev = context output = context past = None k_sample_history = torch.tensor([], device=device, dtype=torch.float) sampling_path = [] # List of (timestep, token)s tried. Could be moving forward, alternate, or backward in timestep. backtrack = 0 with torch.no_grad(): while output.size()[-1] < orig_context_length + length: # when using `past`, the context for the next call should be only # the previous token: https://github.com/huggingface/transformers/issues/1749 logits, past = model(prev, past=past) logits = logits[:, -1, :] / temperature logits = top_k_logits(logits, k=top_k) log_probs = F.softmax(logits, dim=-1) prev, output, k_sample_history, backtrack, past = sampling( output, log_probs, k_sample_history, use_constrained_decoding, constrained_decoding_threshold, sample, sampling_path, backtrack, person_to_category_to_salient_ngram_embed, word_embeds, past, tokenizer, device) if prev == tokenizer.eos_token_id: break return output, sampling_path
9d65d5b67163e4794628d5f508517e22bbada02c
3,645,371
def main(debug=False, args=None): """Start the app. We will see if we need this anyway.""" log.info('>>>>> Starting development server at http://{}/api/ <<<<<'.format( flask_app.config['SERVER_NAME'])) # flask_app.run(debug=settings.FLASK_DEBUG) # flask_app.run(debug=config_json["FLASK_DEBUG"]) flask_app.run(debug=debug) return 0
3c7c3221b32871e5783bc4b421c9cbdd1d6108a1
3,645,372
from ostap.math.models import tf1 as _tf1 def tf1 ( fun , **kwargs ) : """Convert function object to TF1 """ return _tf1 ( fun , **kwargs )
b8af1dd2f7332a9b4585a4d59e8f5299f95b730c
3,645,373
def normalize_requires(filename, **kwargs): """Return the contents of filename, with all [Require]s split out and ordered at the top. Preserve any leading whitespace/comments. """ if filename[-2:] != '.v': filename += '.v' kwargs = fill_kwargs(kwargs) lib = lib_of_filename(filename, **kwargs) all_imports = run_recursively_get_imports(lib, **kwargs) v_name = filename_of_lib(lib, ext='.v', **kwargs) contents = get_file(v_name, **kwargs) header, contents = split_leading_comments_and_whitespace(contents) contents = strip_requires(contents) contents = ''.join('Require %s.\n' % i for i in all_imports[:-1]) + '\n' + contents.strip() + '\n' return header + contents
8973207559289308f98e7c3217a4b825eeb22c91
3,645,374
import warnings import sys def deprecate_module_with_proxy(module_name, module_dict, deprecated_attributes=None): """ Usage: deprecate_module_with_proxy(__name__, locals()) # at bottom of module """ def _ModuleProxy(module, depr): """Return a wrapped object that warns about deprecated accesses""" # http://stackoverflow.com/a/922693/2127762 class Wrapper(object): def __getattr__(self, attr): if depr is None or attr in depr: warnings.warn("Property %s is deprecated" % attr) return getattr(module, attr) def __setattr__(self, attr, value): if depr is None or attr in depr: warnings.warn("Property %s is deprecated" % attr) return setattr(module, attr, value) return Wrapper() deprecated_import(module_name) deprs = set() for key in deprecated_attributes or module_dict: if key.startswith('_'): continue if callable(module_dict[key]) and not isbuiltin(module_dict[key]): module_dict[key] = deprecated(module_dict[key]) else: deprs.add(key) sys.modules[module_name] = _ModuleProxy(sys.modules[module_name], deprs or None)
3647770021a790a6ce1f04c6ef56967f23f03569
3,645,375
import json def index(): """ Returns: render_template (flask method): contains data required to render visualizations """ graphs = [] # extract data needed for visuals # TODO: Below is an example - modify to extract data for your own visuals genre_counts = df.groupby('genre')['message'].count().reset_index().sort_values( 'message',ascending=False) genre_names = list(genre_counts.genre) graph_one=[] graph_one.append( Bar( x = genre_names, y = genre_counts.message.tolist()) ) layout_one = dict(title = 'Distribution of Message Genres', xaxis = dict(title = 'Count',), yaxis = dict(title = 'Genre'), ) graphs.append(dict(data=graph_one, layout=layout_one)) most_common_categories = Y.sum().sort_values(ascending=False).head() graph_two = [] graph_two.append( Bar( x = list(most_common_categories.index), y =list(most_common_categories.values) )) layout_two = dict(title = 'Most Common Categories in Training Data', xaxis = dict(title = 'Count',), yaxis = dict(title = 'Category'), ) graphs.append(dict(data=graph_two, layout=layout_two)) # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template('master.html', ids=ids, graphJSON=graphJSON)
e3c0aa931635eaf9626e43f9493542100a60b768
3,645,376
import numpy import random def uniform_dec(num): """ Declination distribution: uniform in sin(dec), which leads to a uniform distribution across all declinations. Parameters ---------- num : int The number of random declinations to produce. """ return (numpy.pi / 2.) - numpy.arccos(2 * random.random_sample(num) - 1)
bc8724e5aa2e65e87f253d271e3130b9379d5cb5
3,645,377
import os import logging def run_vcfeval(job, context, sample, vcf_tbi_id_pair, vcfeval_baseline_id, vcfeval_baseline_tbi_id, fasta_path, fasta_id, bed_id, out_name = None, score_field=None): """ Run RTG vcf_eval to compare VCFs. Return a results dict like: { "f1": f1 score as float, "summary": summary file ID, "archive": output archive ID, "snp": ROC .tsv.gz data file ID for SNPs, "non_snp": ROC .tsv.gz data file ID for non-SNP variants, "weighted": ROC .tsv.gz data file ID for a weighted combination of SNP and non-SNP variants } Some ROC data file IDs may not be present if they were not calculated. """ # make a local work directory work_dir = job.fileStore.getLocalTempDir() # download the vcf call_vcf_id, call_tbi_id = vcf_tbi_id_pair[0], vcf_tbi_id_pair[1] call_vcf_name = "calls.vcf.gz" job.fileStore.readGlobalFile(vcf_tbi_id_pair[0], os.path.join(work_dir, call_vcf_name)) job.fileStore.readGlobalFile(vcf_tbi_id_pair[1], os.path.join(work_dir, call_vcf_name + '.tbi')) # and the truth vcf vcfeval_baseline_name = 'truth.vcf.gz' job.fileStore.readGlobalFile(vcfeval_baseline_id, os.path.join(work_dir, vcfeval_baseline_name)) job.fileStore.readGlobalFile(vcfeval_baseline_tbi_id, os.path.join(work_dir, vcfeval_baseline_name + '.tbi')) # download the fasta (make sure to keep input extension) fasta_name = "fa_" + os.path.basename(fasta_path) job.fileStore.readGlobalFile(fasta_id, os.path.join(work_dir, fasta_name)) # download the bed regions bed_name = "bed_regions.bed" if bed_id else None if bed_id: job.fileStore.readGlobalFile(bed_id, os.path.join(work_dir, bed_name)) # use out_name if specified, otherwise sample if sample and not out_name: out_name = sample if out_name: out_tag = '{}_vcfeval_output'.format(out_name) else: out_tag = 'vcfeval_output' # output directory out_name = out_tag # indexed sequence sdf_name = fasta_name + ".sdf" # make an indexed sequence (todo: allow user to pass one in) context.runner.call(job, ['rtg', 'format', fasta_name, '-o', sdf_name], work_dir=work_dir) # run the vcf_eval command cmd = ['rtg', 'vcfeval', '--calls', call_vcf_name, '--baseline', vcfeval_baseline_name, '--template', sdf_name, '--output', out_name, '--threads', str(context.config.vcfeval_cores)] if bed_name is not None: cmd += ['--evaluation-regions', bed_name] if context.config.vcfeval_opts: cmd += context.config.vcfeval_opts # override score field from options with one from parameter if score_field: for opt in ['-f', '--vcf-score-field']: if opt in cmd: opt_idx = cmd.index(opt) del cmd[opt_idx] del cmd[opt_idx] cmd += ['--vcf-score-field', score_field] if sample: # Pass the sample name along, since it is needed if the truth VCF has multiple samples cmd += ['--sample', sample] try: context.runner.call(job, cmd, work_dir=work_dir) except: # Dump everything we need to replicate the alignment logging.error("VCF evaluation failed. Dumping files.") context.write_output_file(job, os.path.join(work_dir, call_vcf_name)) context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name)) # TODO: Dumping the sdf folder doesn't seem to work right. But we can dump the fasta context.write_output_file(job, os.path.join(work_dir, fasta_name)) if bed_name is not None: context.write_output_file(job, os.path.join(work_dir, bed_name)) raise # copy results to outstore # vcfeval_output_summary.txt out_summary_id = context.write_output_file(job, os.path.join(work_dir, out_tag, 'summary.txt'), out_store_path = '{}_summary.txt'.format(out_tag)) # vcfeval_output.tar.gz -- whole shebang context.runner.call(job, ['tar', 'czf', out_tag + '.tar.gz', out_tag], work_dir = work_dir) out_archive_id = context.write_output_file(job, os.path.join(work_dir, out_tag + '.tar.gz')) # truth VCF context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name)) context.write_output_file(job, os.path.join(work_dir, vcfeval_baseline_name + '.tbi')) # vcfeval_output_f1.txt (used currently by tests script) f1 = parse_f1(os.path.join(work_dir, os.path.basename(out_name), "summary.txt")) f1_path = os.path.join(work_dir, "f1.txt") with open(f1_path, "w") as f: f.write(str(f1)) context.write_output_file(job, f1_path, out_store_path = '{}_f1.txt'.format(out_tag)) # Start the output dict out_dict = { "f1": f1, "summary": out_summary_id, "archive": out_archive_id } # roc data (written to outstore to allow re-plotting) for roc_name in ['snp', 'non_snp', 'weighted']: roc_file = os.path.join(work_dir, out_tag, '{}_roc.tsv.gz'.format(roc_name)) if os.path.isfile(roc_file): # Save this one dest_file = os.path.join('roc', out_tag, '{}_roc.tsv.gz'.format(roc_name)) out_dict[roc_name] = context.write_output_file(job, roc_file, dest_file) return out_dict
029ff152a276e325e34a1522b6aee87ff1ddecd4
3,645,378
def helicsInputGetBytes(ipt: HelicsInput) -> bytes: """ Get the raw data for the latest value of a subscription. **Parameters** - **`ipt`** - The input to get the data for. **Returns**: Raw string data. """ if HELICS_VERSION == 2: f = loadSym("helicsInputGetRawValue") else: f = loadSym("helicsInputGetBytes") err = helicsErrorInitialize() maxDataLen = helicsInputGetByteCount(ipt) + 1024 data = ffi.new("char[{maxDataLen}]".format(maxDataLen=maxDataLen)) actualSize = ffi.new("int[1]") f(ipt.handle, data, maxDataLen, actualSize, err) if err.error_code != 0: raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode()) else: return ffi.unpack(data, length=actualSize[0])
e7d14623490aa77e800d7f1b10c1313a1f1fbf8f
3,645,379
def named_char_class(char_class, min_count=0): """Return a predefined character class. The result of this function can be passed to :func:`generate_password` as one of the character classes to use in generating a password. :param char_class: Any of the character classes named in :const:`CHARACTER_CLASSES` :param min_count: The minimum number of members of this class to appear in a generated password """ assert char_class in CHARACTER_CLASSES return CharClass(frozenset(_char_class_members[char_class]), min_count)
53f1b580eba6d5ef5ea38bd04606a9fbca2cb864
3,645,380
import argparse def parse_args(): """Parse commandline arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--minSdkVersion', default='', dest='min_sdk_version', help='specify minSdkVersion used by the build system') parser.add_argument('--targetSdkVersion', default='', dest='target_sdk_version', help='specify targetSdkVersion used by the build system') parser.add_argument('--raise-min-sdk-version', dest='raise_min_sdk_version', action='store_true', help='raise the minimum sdk version in the manifest if necessary') parser.add_argument('--library', dest='library', action='store_true', help='manifest is for a static library') parser.add_argument('--uses-library', dest='uses_libraries', action='append', help='specify additional <uses-library> tag to add. android:requred is set to true') parser.add_argument('--optional-uses-library', dest='optional_uses_libraries', action='append', help='specify additional <uses-library> tag to add. android:requred is set to false') parser.add_argument('--uses-non-sdk-api', dest='uses_non_sdk_api', action='store_true', help='manifest is for a package built against the platform') parser.add_argument('--logging-parent', dest='logging_parent', default='', help=('specify logging parent as an additional <meta-data> tag. ' 'This value is ignored if the logging_parent meta-data tag is present.')) parser.add_argument('--use-embedded-dex', dest='use_embedded_dex', action='store_true', help=('specify if the app wants to use embedded dex and avoid extracted,' 'locally compiled code. Must not conflict if already declared ' 'in the manifest.')) parser.add_argument('--extract-native-libs', dest='extract_native_libs', default=None, type=lambda x: (str(x).lower() == 'true'), help=('specify if the app wants to use embedded native libraries. Must not conflict ' 'if already declared in the manifest.')) parser.add_argument('--has-no-code', dest='has_no_code', action='store_true', help=('adds hasCode="false" attribute to application. Ignored if application elem ' 'already has a hasCode attribute.')) parser.add_argument('input', help='input AndroidManifest.xml file') parser.add_argument('output', help='output AndroidManifest.xml file') return parser.parse_args()
4ccbb4fa225abbe4eaa249a2dbc537d338559b62
3,645,381
from typing import Sequence import torch def make_grid(spatial_dim: Sequence[int]) -> torch.Tensor: """Make the grid of coordinates for the Fourier neural operator input. Args: spatial_dim: A sequence of spatial deimensions `(height, width)`. Returns: A torch.Tensor with the grid of coordinates of size `(1, height, width, 2)`. """ grids = [] grids.append(np.linspace(0, 1, spatial_dim[0])) grids.append(np.linspace(0, 1, spatial_dim[1])) grid = np.vstack([u.ravel() for u in np.meshgrid(*grids)]).T grid = grid.reshape(1, spatial_dim[0], spatial_dim[1], 2) grid = grid.astype(np.float32) return torch.tensor(grid)
bf9c858eb068e3f20db8e736883e8b1e74155763
3,645,382
import logging def _default_handlers(stream, logging_level, include_time): """Return a list of the default logging handlers to use. Args: stream: See the configure_logging() docstring. include_time: See the configure_logging() docstring. """ # Create the filter. def should_log(record): """Return whether a logging.LogRecord should be logged.""" if record.name.startswith('blinkpy.third_party'): return False return True logging_filter = logging.Filter() logging_filter.filter = should_log # Create the handler. handler = logging.StreamHandler(stream) if include_time: prefix = '%(asctime)s - ' else: prefix = '' if logging_level == logging.DEBUG: formatter = logging.Formatter(prefix + '%(name)s: [%(levelname)s] %(message)s') else: formatter = logging.Formatter(prefix + '%(message)s') handler.setFormatter(formatter) handler.addFilter(logging_filter) return [handler]
5aacd076f80b2a7e1649dc1806feba0df883ccfa
3,645,383
import os def find_spec2d_from_spec1d(spec1d_files): """ Find the spec2d files corresponding to the given list of spec1d files. This looks for the spec2d files in the same directory as the spec1d files. It will exit with an error if a spec2d file cannot be found. Args: spec1d_files (list of str): List of spec1d files generated by PypeIt. Returns: list of str: List of the matching spec2d files. """ spec2d_files = [] for spec1d_file in spec1d_files: # Check for a corresponding 2d file (path, filename) = os.path.split(spec1d_file) spec2d_file = os.path.join(path, filename.replace('spec1d', 'spec2d', 1)) if not os.path.exists(spec2d_file): msgs.error(f'Could not find matching spec2d file for {spec1d_file}') spec2d_files.append(spec2d_file) return spec2d_files
7cad3cbe3679a89e8eac19d14cde9aa499a9c67c
3,645,384
import pkg_resources from pyscaffold.utils import check_setuptools_version from pyscaffold.contrib.setuptools_scm import get_version from pyscaffold.contrib.setuptools_scm.hacks import parse_pkginfo from pyscaffold.contrib.setuptools_scm.git import parse as parse_git from pyscaffold.integration import local_version2str, version2str import os import sys def bootstrap_cfg(): """Allow PyScaffold to be used to package itself. Usually, running ``python setup.py egg_info --egg-base .`` first is a good idea. """ src_dir = os.path.join(__location__, 'src') egg_info_dir = os.path.join(__location__, 'PyScaffold.egg-info') has_entrypoints = os.path.isdir(egg_info_dir) sys.path.insert(0, src_dir) pkg_resources.working_set.add_entry(src_dir) check_setuptools_version() def parse(root): try: return parse_pkginfo(root) except IOError: return parse_git(root) config = dict( version_scheme=version2str, local_scheme=local_version2str, ) if has_entrypoints: return dict(use_pyscaffold=True) else: return dict( version=get_version( root=__location__, parse=parse, **config) )
33aee76a46493cbe735a64281b5d6641ab433746
3,645,385
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
4056af5cbf2f5ff0159a6514e8ee3d09d9f4051d
3,645,386
def tan(data): """Compute elementwise tan of data. Parameters ---------- data : relay.Expr The input data Returns ------- result : relay.Expr The computed result. """ return _make.tan(data)
5c11fa721debd0082514c62f8a8f3afa268ad502
3,645,387
def get_battery_data(battery, user=None, start = None, end = None): """ Returns a DataFrame with battery data for a user. Parameters ---------- battery: DataFrame with battery data user: string, optional start: datetime, optional end: datetime, optional """ assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame" if(user!= None): assert isinstance(user, str),"user not given in string format" battery_data = battery[(battery['user']==user)] else: battery_data = battery if(start!=None): start = pd.to_datetime(start) else: start = battery_data.iloc[0]['datetime'] if(end!= None): end = pd.to_datetime(end) else: end = battery_data.iloc[len(battery_data)-1]['datetime'] battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)] battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level']) #df['column'].fillna(pd.Timedelta(seconds=0)) #df.dropna() battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last') battery_data = battery_data.drop(['user','device','time','datetime'],axis=1) return battery_data
d45e40e89195d099b1c7a02fc033cd665b3b72f6
3,645,388
import re import sys def fix_brushes(brushes, thresh, vmf_in, snaplo, snaphi): """ Find and fix brushes with floating point plane coordinates. Returns a tuple containing the total number of brushes whose coordinates were rounded, a list of tuples which pairs suspicious brush IDs with the greatest deviation any one of their coordinates makes from the nearest multiple of snaplo, and a fixed version of vmf_in. Keyword arguments: brushes: list of brush strings to search thresh: threshold between snaplo and snaphi vmf_in: string containing input VMF contents snaplo: deviations less than thresh will be rounded to the nearest multiple of this value snaphi: deviations equal to or greater than thresh will be rounded to the nearest multiple of this value """ vmf_out = vmf_in rounded_count = 0 percent = len(brushes) / 100.0 suspects = [] for i, brush in enumerate(brushes): brush_id = int(re.search(r'"id"\s"(\d+)"', brush).group(1)) float_planes = [] for plane in re.findall(r'"plane"\s".*?"', brush, re.DOTALL): if '.' in plane: float_planes.append(plane) if not float_planes: continue max_dev = get_max_dev(float_planes, snaplo) if max_dev < thresh or snaphi is not None: brush_new = brush for plane in float_planes: plane_new = fix_plane(plane, thresh, snaplo, snaphi) brush_new = brush_new.replace(plane, plane_new) vmf_out = vmf_out.replace(brush, brush_new) rounded_count += 1 else: suspects.append((brush_id, max_dev)) sys.stdout.write('\r%s%% complete' % str(int(i / percent))) sys.stdout.flush() sys.stdout.write("\r \n") sys.stdout.flush() return (rounded_count, suspects, vmf_out)
de257913badb6de2ae782aa3b93e8cb3897fb669
3,645,389
import logging import traceback def mutate(): """ Handles the '/mutate' path and accepts CREATE and UPDATE requests. Sends its response back, which either denies or allows the request. """ try: logging.debug(request.json) admission_request = AdmissionRequest(request.json) response = __admit(admission_request) except Exception as err: if isinstance(err, BaseConnaisseurException): err_log = str(err) msg = err.user_msg # pylint: disable=no-member else: err_log = str(traceback.format_exc()) msg = "unknown error. please check the logs." send_alerts(admission_request, False, msg) logging.error(err_log) return jsonify( get_admission_review( admission_request.uid, False, msg=msg, detection_mode=DETECTION_MODE, ) ) send_alerts(admission_request, True) return jsonify(response)
9b3a649eafe228127dfc72ddf8fb346248ccba86
3,645,390
from typing import List def generate_options_for_resource_group(control_value=None, **kwargs) -> List: """Dynamically generate options for resource group form field based on the user's selection for Environment.""" if control_value is None: return [] # Get the environment env = Environment.objects.get(id=control_value) # Get the Resource Groups as defined on the Environment. The Resource Group is a # CustomField that is only updated on the Env when the user syncs this field on the # Environment specific parameters. resource_groups = env.custom_field_options.filter(field__name="resource_group_arm") return [rg.str_value for rg in resource_groups]
8271d6bf113f18890862835dfd5d0882a7b7490f
3,645,391
def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs): """ Plot the tide gauges which fall within the model domain (in space and time) defined by the given FileReader object. Parameters ---------- fvcom : PyFVCOM.read.FileReader FVCOM model data as a FileReader object. tide_db_path : str Path to the tidal database. threshold : float, optional Give a threshold distance (in spherical units) beyond which a gauge is considered too far away. legend : bool, optional Set to True to add a legend to the plot. Defaults to False. Any remaining keyword arguments are passed to PyFVCOM.plot.Plotter. Returns ------- plot : PyFVCOM.plot.Plotter The Plotter object instance for the map """ tide_db = TideDB(tide_db_path) gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True) gauges_in_domain = [] fvcom_nodes = [] for gi, gauge in enumerate(gauge_locations): river_index = fvcom.closest_node(gauge, threshold=threshold) if river_index: gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge) times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime)) if not np.any(data): continue gauges_in_domain.append(gi) fvcom_nodes.append(river_index) plot = Plotter(fvcom, **kwargs) fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat) plot.plot_field(-fvcom.grid.h) plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model') # Add the gauge locations. rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1]) plot.axes.plot(rx, ry, 'wo', label='Gauges') for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]): plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203) if legend: plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10) return plot
c73069c67ecda4429c86b6f887cc5fd5a109b10b
3,645,392
from operator import or_ def get_element_block( xml_string: str, first_name: str, second_name: str = None, include_initial: bool = True, include_final: bool = True ) -> str: """ warning: use great caution if attempting to apply this function, or anything like it, to tags that that may appear more than once in the label. this _general type of_ approach to XML parsing works reliably only in the special case where tag names (or sequences of tag names, etc.) are unique (or their number of occurrences are otherwise precisely known) """ if second_name is None: element_names = [first_name] else: element_names = [first_name, second_name] split = tuple(split_at( xml_string.splitlines(), are_in(element_names, or_), keep_separator=True )) chunk = split[2] if include_initial: chunk = split[1] + chunk if include_final: chunk = chunk + split[3] return "\n".join(chunk)
426142b5f1e96dc038640305eb918d065c9bdf20
3,645,393
def eval_eu_loss(ambiguity_values, dfs_ambiguity): """Calculate the expected utility loss that results from a setting that incorporates different levels of ambiguity. Args: ambiguity_values (dict): Dictionary with various levels of ambiguity to be implemented (key = name of scenario). dfs_ambiguity (list): List of pd.DataFrame objects that containt the of simulated models. Returns: df_EU (pd.DataFrame): Dataframe that summarizes that expected utility loss under the various ambiguity scenarios. """ EU, EU_Loss = {}, {} ambiguity_labels = get_dict_labels(ambiguity_values) # KW94 specific index_value_func = [ "Value_Function_A", "Value_Function_B", "Value_Function_Edu", "Value_Function_Home", ] # Calculate the Expected Utility and EU loss for each ambiguity value # Expected utility = value function at the initial period for df, ambiguity_label in zip(dfs_ambiguity, ambiguity_labels): EU[ambiguity_label] = [] EU_Loss[ambiguity_label] = [] # Retrieve the last identifier within looped dataframe for i in range(0, df.index[-1][0] + 1): EU[ambiguity_label].append(df[index_value_func].loc[(i, 0)].max()) EU[ambiguity_label] = np.mean(EU[ambiguity_label]) EU_Loss[ambiguity_label] = np.abs( (EU[ambiguity_label] - EU["absent"]) / EU["absent"] ) # Assemble data frames df_EU = pd.DataFrame.from_dict(EU, orient="index", columns=["EU"]) df_EU["EU_Loss"] = pd.Series(EU_Loss) return df_EU
00b658640b91de4dd48e99eac6437bebafb8e9b1
3,645,394
def reset(ip: str = None, username: str = None) -> int: """ Reset records that match IP or username, and return the count of removed attempts. This utility method is meant to be used from the CLI or via Python API. """ attempts = AccessAttempt.objects.all() if ip: attempts = attempts.filter(ip_address=ip) if username: attempts = attempts.filter(username=username) count, _ = attempts.delete() log.info('AXES: Reset %s access attempts from database.', count) return count
3e404ef4b32cc0e183e676e7d07137780beaf3f7
3,645,395
def try_patch_column(meta_column: MetaColumn) -> bool: """Try to patch the meta column from request.json. Generator assignment must be checked for errors. Disallow column type change when a generator is assigned and when the column is imported. An error is raised in that case. """ if 'col_type' in request.json and request.json['col_type'] != meta_column.col_type: if meta_column.reflected_column_idf is not None: raise ColumnError('cannot change the type of an imported column', meta_column) if meta_column.generator_setting is not None: raise ColumnError('cannot change the type of a column with an assigned generator', meta_column) patch_all_from_json(meta_column, ['name', 'col_type', 'nullable']) generator_setting_id = request.json.get('generator_setting_id') if generator_setting_id is not None: facade = inject(GeneratorFacade) return facade.update_column_generator(meta_column, generator_setting_id) return True
0feb5598853b8a5b1cd060bd806f2fcc6afd69f6
3,645,396
import btrfsutil import os def get_subvs(parent): """ :param parent: :return: """ #ls_dirs=[os.path.join(parent, name) for name in os.listdir(parent) if os.path.isdir(os.path.join(parent, name))] return [directory for directory in os.listdir(parent) if btrfsutil.is_subvolume(directory)]
1baa63ce2b461299ef7e0bf0a6021aa4b988520e
3,645,397
def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False)
74253ad0e7a9d23bd8c3d69097e8c1b8508c8b2f
3,645,398
import sys import six def debug_ssh(function): """Decorator to generate extra debug info in case off SSH failure""" def wrapper(self, *args, **kwargs): try: return function(self, *args, **kwargs) except tempest.lib.exceptions.SSHTimeout: try: original_exception = sys.exc_info() caller = test_utils.find_test_caller() or "not found" if self.server: msg = 'Caller: %s. Timeout trying to ssh to server %s' LOG.debug(msg, caller, self.server) if self.log_console and self.servers_client: try: msg = 'Console log for server %s: %s' console_log = ( self.servers_client.get_console_output( self.server['id'])['output']) LOG.debug(msg, self.server['id'], console_log) except Exception: msg = 'Could not get console_log for server %s' LOG.debug(msg, self.server['id']) # re-raise the original ssh timeout exception six.reraise(*original_exception) finally: # Delete the traceback to avoid circular references _, _, trace = original_exception del trace return wrapper
bfd3caf911e5d7ecfac9111825ef594413e6bb5f
3,645,399