content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_attribute(node_id, runtime_property, deployment_id, rest_client): """ Get a runtime property for the first node instance of a node in a deployment. :param node_id: The ID of a node template in a deployment. :type node_id: str :param runtime_property: The key of a runtime property. :type runtime_property: str :param deployment_id: A Cloudify Deployment ID. :type deployment_id: str :param rest_client: A Cloudify REST client. :type rest_client: cloudify_rest_client.client.CloudifyClient :return: The runtime property value. :rtype: Any JSON serializable type. """ for node_instance in rest_client.node_instances.list(node_id=node_id): if node_instance.deployment_id != deployment_id: continue return node_instance.runtime_properties.get(runtime_property)
ccdcf619f6136f8889c44183594b58ab0a318745
644,430
def audit(log): """ Single method to ensure that the log object is an audit log (by binding the audit log param) :param log: a bound log object :returns: a bound log object with keyword that specifies it as an audit log already bound """ return log.bind(audit_log=True)
54b25800392c49426000a4144401c51834a35848
39,604
def gtrr(registers, opcodes): """gtrr (greater-than register/register) sets register C to 1 if register A is greater than register B. Otherwise, register C is set to 0. """ test_result = int(registers[opcodes[1]] > registers[opcodes[2]]) return test_result
07534f469177086cd9d5f7c12701dcee45506e38
262,987
def find_2_integers_multiplies_to_number(numbers, goal): """ Part 1 - Find 2 integers that multiplies to a certain number n. Time: O(n) Space: O(n) """ shown = set() result = None for n in numbers: if goal // n in shown: result = (n, goal // n) else: shown.add(n) return result
c978d451606cbccbecdf1e32544416a9914460f0
198,190
def print_translation(args): """ Parses calls to print to convert to the C++ equivalent Parameters ---------- args : list of str List of arguments to add to the print statement Returns ------- str The converted print statement """ return_str = "std::cout << " for arg in args[:-1]: return_str += arg + " + " return return_str + args[-1] + " << std::endl"
be2a3a19c8a28a405dcc8c4ed430a06481f5df20
231,907
def add_label(obj, label): """Add color bar label Args: obj (object): a ``matplotlib.colorbar.Colorbar`` object label (str): label for the color bar Returns: same as input """ if label is not None: obj.set_label(label) return obj
e92022632bf68bee3851e1f4da55085b582a9069
200,840
def get_func_task_name(func): """ Format the task function name via inspection. """ return func.__name__
acfeadbbd9ae2306e8d730178333454be93110c2
541,016
import torch def matrix_to_cartesian(batch: torch.Tensor, keep_square: bool = False) -> torch.Tensor: """ Transforms a matrix for a homogeneous transformation back to cartesian coordinates. Args: batch: the batch oif matrices to convert back keep_square: if False: returns a NDIM x NDIM+1 matrix to keep the translation part if True: returns a NDIM x NDIM matrix but looses the translation part. defaults to False. Returns: torch.Tensor: the given matrix in cartesian coordinates """ batch = batch[:, :-1, ...] if keep_square: batch = batch[..., :-1] return batch
3147a7d04b01f36a42b385de45676fa1ddad4581
679,298
import ipaddress def get_ip_networks_from_file(filename, strict=True): """ Quickly generate a list of IPv4Networks from a file with built-in validation :param str filename: The file to be parsed :param bool strict: If true, raises errors when invalid networks are encountered. :return: list """ with open(filename) as fin: lst = [] for x in fin.readlines(): try: lst.append(ipaddress.ip_network(x.strip())) except ValueError as err: if strict: raise err else: continue return lst
8486a13a68d1b170e5510f695055be89664ecf19
592,047
def sort_url_list(cc_values, all_acqdates, all_urls): """Sort the url list by increasing cc_values and acqdate.""" cc_values = sorted(cc_values) all_acqdates = sorted(all_acqdates, reverse=True) all_urls = [x for (y, z, x) in sorted(zip(cc_values, all_acqdates, all_urls))] urls = [] for url in all_urls: urls.append('http://storage.googleapis.com/' + url.replace('gs://', '')) return urls
ce8c4b4c45166a9e1bf28646cd0f0cb242ba1a24
456,900
import pathlib import random import shutil def prepare_containerfile(anchor: str, dockerfile: str) -> str: """Ensure that Containerfile, or a proxy Containerfile is in context_dir. Args: anchor: Build context directory dockerfile: Path to Dockerfile/Containerfile Returns: path to Dockerfile/Containerfile in root of context directory """ anchor_path = pathlib.Path(anchor) dockerfile_path = pathlib.Path(dockerfile) if dockerfile_path.parent.samefile(anchor_path): return dockerfile_path.name proxy_path = anchor_path / f".containerfile.{random.getrandbits(160):x}" shutil.copy2(dockerfile_path, proxy_path, follow_symlinks=False) return proxy_path.name
77731c1b75c7ad899c0dcfb9dde5f6052f9ef150
470,138
def parse_arg(x): """ Parse a string argument and attempt to turn numbers into actual number types. Parameters ---------- x : str A string arg. Returns ------- str, float, or int x type converted. """ if x.replace('.', '').isdigit(): if x.isdigit(): x = int(x) else: x = float(x) return x
4ab80da7070d1740ac3d7428174e7e7531103964
575,966
def array2String(ar,delim): """ convert array value to String with delim """ res = "" for i in range(len(ar)): if i>0: res +=delim res += str(ar[i]) return res
6d5fdce65efae4f388757392aba21211fc676563
373,169
def make_full_typename(typename): """Returns "pkg/msg/Type" for "pkg/Type".""" if "/msg/" in typename or "/" not in typename: return typename return "%s/msg/%s" % tuple((x[0], x[-1]) for x in [typename.split("/")])[0]
cb8eccd6a168dcbd0a0241335f4ee0cf0d0f975d
435,332
import hashlib def string_to_hex_color(input_string): """ Takes any string, generates a pseudo-random color from it. Used to generate colors for e.g. families. :param input_string: base string to generate a color from :return: a semi random color in #fff format """ hashed_string = hashlib.sha1(str(input_string).encode('utf-8')).hexdigest() color = "#" + hashed_string[0:3].upper() return color
3832c14b23702d24fcd9c4bf7f46f48e21aaed96
466,812
def get_parameters(line): """Get parameters output by iostat. Args: line: Process line, hopefully containing the parameters. Returns: Tuple: (param_type, param_list) Where: param_type is one of ('avg-cpu:', 'Device:') param_list is a list of parameter names cleaned up for transmission, or empty list if the parameters could not be determined. """ param_type = None param_list = [] parts = line.split() if parts and parts[0] in ('avg-cpu:', 'Device:'): param_type = parts[0] for part in parts[1:]: for character in '/-%': if character in part: part = part.replace(character, '_') param_list.append(part) return param_type, param_list
391924dda7944edb7c5559b4fb9d1f4018d5c358
465,336
def marker_xml(marker, marker_words, w_ids, attrib, value): """Returns marker xml given words and word ids. For headers, this function returns a byte string like: <header level="1" text="Some Arabic text"> <ref id="145795"/> <ref id="145796"/> <ref id="145797"/> <ref id="145798"/> <ref id="145799"/> </header> And for Quran/Hadith quotes: <quote type="quran|hadith" text="Some Arabic text"> <ref id="3824"/> <ref id="3825"/> <ref id="3826"/> <ref id="3827"/> <ref id="3828"/> </quote> """ xml = [] xml.append('<{} {}="{}" text="{}">\n'.format(marker, attrib, value, ' '.join(marker_words)).encode('utf-8')) for w_id in w_ids: xml.append(' <ref id="{}"/>\n'.format(w_id).encode('utf-8')) xml.append('</{}>\n'.format(marker).encode('utf-8')) return b''.join(xml)
b05b91d280621bb0acb4d54cfcf8b002646d0cc5
346,000
def create_canvas(width, height, enable_color = True): """ Create a new char canvas. Parameters ---------- height: height of the game view (int). width: width of the game view (int). enable_color: enable color in the game view (bool) Return ------ canvas: 2D ascii canvas (dic). Version ------- Specification: Nicolas Van Bossuyt (v1. 10/02/17) Implementation: Nicolas Van Bossuyt (v1. 10/02/17) """ # Initialize the canvas. canvas = {'size': (width, height), 'color': enable_color, 'grid': {}} # Create canvas's tiles. for x in range(width): for y in range(height): canvas['grid'][(x,y)] = {'color':None, 'back_color':None, 'char':' '} return canvas
b0ba6648eb47533e939321d107f64e1323e9a81d
77,720
def file_get_contents(path : str) -> bytes: """ Returns contents of file located at 'path' """ with open(path, 'rb') as f: return f.read()
d8785080cc6e2a234ce44d8cc6961524f20de03c
640,929
def z2g(r_geoid, g0, z): """Calculate gravitational acceleration at elevation Derived from atmlabs equivalent function https://www.sat.ltu.se/trac/rt/browser/atmlab/trunk/geophysics/pt2z.m :param r: surface radius at point [m] :param g0: surface gravitational acceleration at point [m/s^2] :param z: elevation [m] :returns: gravitational acceleration at point [m/s^2] """ #137 function g = z2g(r_geoid,g0,z) #138 % #139 g = g0 * (r_geoid./(r_geoid+z)).^2; return g0 * (r_geoid/(r_geoid+z))**2;
c04080156670e137f56ba4dfc872530bbada4d27
17,969
def expand_relative_path(module_name, rel_path): """ Turn a relative module path into an absolute one. `module_name` is the absolute name of the reference module, `rel_path` is the path relative to this module. """ module_path=module_name.split(".") if not rel_path.startswith("."): return rel_path else: while rel_path.startswith("."): rel_path=rel_path[1:] module_path=module_path[:-1] return ".".join(module_path)+"."+rel_path
39e35fce82a020e7d91e07982515b707b34e0887
248,538
def validate(data): """Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid """ if not isinstance(data, dict): raise ValueError("data should be dict") if "text" not in data or not isinstance(data["text"], str) or len(data["text"]) < 1: raise ValueError("text field is required and should not be empty") if "markdown" in data and not isinstance(data["markdown"], bool): raise ValueError("markdown field should be bool") if "attachments" in data: if not isinstance(data["attachments"], list): raise ValueError("attachments field should be list") for attachment in data["attachments"]: if "text" not in attachment and "title" not in attachment: raise ValueError("text or title is required in attachment") return True
ae8b7e74bd7607a7c8f5079014a0f5e3af5bc011
706,283
def __dowson_hamrock_parameters(r_eff, param_g, param_u, param_w): """ Calculate the EHD-parameter for Dowson-Hamrock film thickness calculations. The name EHD-parameter is used within the tribology package to refer to what is calculated below; this parameter is not officially defined by Dowson-Hamrock. Parameters ---------- r_eff: scalar The effective radius of the contact problem. param_g: scalar The elasticity parameter of the contact problem. param_u: scalar The velocity parameter of the contact problem. param_w: scalar The load parameter of the contact problem. Returns ------- param_ehd: scalar The EHD parameter of the contact problem. """ param_ehd = r_eff * param_g ** 0.53 * param_u ** 0.67 * param_w ** -0.067 return param_ehd
a028681878785570cd0aa18f5b2eb8c14f8b5d9b
361,708
def entity(reference): """Return a numeric (&#reference;) or symbolic: (&reference;) entity, depending on the reference's type """ try: return '&#{0:d};'.format(reference) except ValueError: return '&{0};'.format(reference) #
1b547a9506badd9fc3ddd575c3e67acebe6af539
31,312
def get_unlisted_setting(request): """ Given a request object, return the resulting `unlisted` setting. - True if set to "true" - False if set to false nor not set at all """ if 'X-Unlisted' in request.headers and \ request.headers.get('X-Unlisted') == 'true': return True return False
f98d095e8e897584dd1c59bbc7721e0f8c4705cb
118,795
def check_strand(strand): """ Check the strand format. Return error message if the format is not as expected. """ if (strand != '-' and strand != '+'): return "Strand is not in the expected format (+ or -)"
9c2e720069ad8dcc8f867a37925f6e27e91dcb3f
5,575
def tc2s(tc:str, base:float=25) -> float: """Convert an SMPTE timecode (HH:MM:SS:FF) to number of seconds Args: tc (str): Source timecode base (float): Frame rate (default: 25) Returns: float: Resulting value in seconds """ tc = tc.replace(";", ":") hh, mm, ss, ff = [int(e) for e in tc.split(":")] res = hh * 3600 res += mm * 60 res += ss res += ff / float(base) return res
154345603ec2eee693472d9390169c13f955c3df
442,436
import re def get_parameters_from_string(text): """ Find all strings starting with '%'. For example, for this string below, it should return ['NUM_POINTS', 'IN', 'OUT'] 'run --rm -it -v "$(pwd)":/payload gitlab-registry.cern.ch/zhangruihpc/endpointcontainer:latest /bin/bash -c "echo "--num_points %NUM_POINTS"; /bin/cat /payload/%IN>/payload/%OUT"' """ ret = re.findall(r"[%]\w+", text) ret = [r.replace('%', '') for r in ret] # remove dumplications ret = list(set(ret)) return ret
6b6bd18b378be52ad188569fe44ab6af69b0b522
173,051
import struct def pack_dint(n): """pack 32 bit into 4 bytes little endian""" return struct.pack('<i', n)
88f7410e6b3fdb9a6e724d6ed64f4fef12030749
685,795
def iou(a, b): """ Computes intersection over union, which is high if two AABBs are similar and highly overlapping """ a_xmax = a.x + a.w a_ymax = a.y + a.h b_xmax = b.x + b.w b_ymax = b.y + b.h xA = max(a.x, b.x) yA = max(a.y, b.y) xB = min(a_xmax, b_xmax) yB = min(a_ymax, b_ymax) interArea = (xB - xA) * (yB - yA) boxAArea = a.w*a.h boxBArea = b.w*b.h iou = interArea / float(boxAArea + boxBArea - interArea) return iou
facf7d72e7401a25c6562dabd041718c2bcb11cf
160,764
def encode(o): """Encodes with UTF-8""" return str(o).encode('UTF-8')
9408900ddfb68f32b42f3d2f1aefb86329e9245c
521,761
def extract_relationtypes(rs3_xml_tree): """ extracts the allowed RST relation names and relation types from an RS3 XML file. Parameters ---------- rs3_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an RS3 XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'rst' or 'multinuc') as values (str). """ return {rel.attrib['name']: rel.attrib['type'] for rel in rs3_xml_tree.iter('rel') if 'type' in rel.attrib}
eb78f5f543cba076b2defded3c8a122c55887cf8
323,803
def is_comment(source_str, file_ext): """Returns True if the line appears to start with a comment, False otherwise.""" if file_ext in ['.c', '.cpp', '.cxx', '.h', '.m', '.java', '.rs']: if source_str.find('//') == 0 or source_str.find('/*') == 0: return True elif file_ext in ['.py']: if source_str.find('#') == 0: return True elif file_ext in ['.asm']: if source_str.find(';') == 0: return True return False
b5e95ecaba675f04b127a0ea85f60eeeecdfdf5a
395,515
def build_mac_map(nova): """Build a Mac -> (IP, name, aliases) map for a list of Nova servers.""" mac_map = {} for server in nova.servers.list(): name = server.name aliases = [name, getattr(server, 'OS-EXT-SRV-ATTR:instance_name')] if name.startswith('overcloud-'): aliases.append(name[10:]) for addr in server.addresses['ctlplane']: mac = addr['OS-EXT-IPS-MAC:mac_addr'] mac_map[mac] = (addr['addr'], name, aliases) return mac_map
8153faf39beb2ba8f2ff8e8e6751f8ec4796307a
504,752
def round_kwarg_floats(kwarg_dict, decimals=3): """ Round float values in a dictionary. Parameters ---------- kwarg_dict : dict decimals : int, default 3 Number of decimal places to round to. Returns ------- dict Dictionary with rounded floats. """ rounded_vals = [] for val in kwarg_dict.values(): if isinstance(val, float): rounded_vals.append(round(val, decimals)) else: rounded_vals.append(val) return {key: val for key, val in zip(kwarg_dict.keys(), rounded_vals)}
ba3bedd8d818578e772f80c1cf7d08a8788ef7be
377,348
from typing import Any import importlib def _import_dotted_name(name: str) -> Any: """Returns the Python object with the given dotted name. Args: name: The dotted name of a Python object, including the module name. Returns: The named value. Raises: ValueError: If `name` is not a dotted name. ModuleNotFoundError: If no dotted prefix of `name` can be imported. AttributeError: If the imported module does not contain a value with the indicated name. """ name_pieces = name.split('.') if len(name_pieces) < 2: raise ValueError('Expected a dotted name including the module name.') # We don't know where the module ends and the name begins; so we need to # try different split points. Longer module names take precedence. for i in range(len(name_pieces) - 1, 0, -1): try: value = importlib.import_module('.'.join(name_pieces[:i])) for name_piece in name_pieces[i:]: value = getattr(value, name_piece) # Can raise AttributeError. return value except ModuleNotFoundError: if i == 1: # Final iteration through the loop. raise # The following line should be unreachable -- the "if i == 1: raise" above # should have raised an exception before we exited the loop. raise ModuleNotFoundError(f'No module named {name_pieces[0]!r}')
7b6b5c168c6025c10dfa58ca5cf2949229064cbc
422,987
import json def parse_parameters(params): """ Parse algorithm parameters from the config file. :param params: JSON encoded parameters. :type params: str :return: A dict of parameters. :rtype: dict(str: *) """ return dict((str(k), v) for k, v in json.loads(params).items())
308258765701843c4b2e224df0272d94dabe6840
365,128
def _non_adjacent_filter(self, cmd, qubit_graph, flip=False): """A ProjectQ filter to identify when swaps are needed on a graph This flags any gates that act on two non-adjacent qubits with respect to the qubit_graph that has been given Args: self(Dummy): Dummy parameter to meet function specification. cmd(projectq.command): Command to be checked for decomposition into additional swap gates. qubit_graph(Graph): Graph object specifying connectivity of qubits. The values of the nodes of this graph are unique qubit ids. flip(Bool): Flip for switching if identifying a gate is in this class by true or false. Designed to meet the specification of ProjectQ InstructionFilter and DecompositionRule with one function. Returns: bool: When flip is False, this returns True when a 2 qubit command acts on non-adjacent qubits or when it acts only on a single qubit. This is reversed when flip is used. """ if qubit_graph is None: return True ^ flip total_qubits = (cmd.control_qubits + [item for qureg in cmd.qubits for item in qureg]) # Check for non-connected gate on 2 qubits if ((len(total_qubits) == 1) or (len(total_qubits) == 2 and qubit_graph.is_adjacent( qubit_graph.find_index(total_qubits[0].id), qubit_graph.find_index(total_qubits[1].id)))): return True ^ flip return False ^ flip
9d3a55341c2a1410c5c1864ce5fcd6ea177d4026
697,373
def get_total_open_threads(feedback_thread_analytics): """Returns the count of all open threads for the given FeedbackThreadAnalytics domain objects.""" return sum( feedback.num_open_threads for feedback in feedback_thread_analytics)
118c5c23e28b1485277a7042537513fa769f735e
545,796
from pathlib import Path from typing import List def get_dependencies(dependency_directory: Path) -> List[str]: """ Creates a list of dependencies based on the contents of the dependency directory. :param dependency_directory: where the dependencies uploaded by the user are stored :return: a list of the dependencies' paths (including the dependency directory) """ dependencies = [] for path in dependency_directory.iterdir(): if path.is_file(): dependencies.append(f"{Path(path.parent.name) / path.name}") return dependencies
b7d2069c04cf98281d963fe36e4d5fd3e8a715ab
422,795
def _strip_prefix(cmd_line): """Strip an OS operating prefix from a command line. """ if cmd_line.startswith('cmd.exe /c '): return cmd_line[11:].strip('"') if cmd_line.startswith('cmd /c '): return cmd_line[7:].strip('"') if cmd_line.startswith('/bin/bash -c '): return cmd_line[13:] if cmd_line.startswith('/bin/sh -c '): return cmd_line[11:] return cmd_line
4ba26c654f578cef0ad471a3126c47611b2340db
444,709
import json def parse_response(data): """ Get json from data.""" received_data = json.loads(data) return received_data
74d51dccbd4b162366d729bdb9dae96e70d172ab
505,595
def partialr(func, *args, **kwargs): """partialr: Partially applies last arguments. New keyworded arguments extend and override kwargs.""" return lambda *a, **kw: func(*(a + args), **dict(kwargs, **kw))
bcb1433b1272bb0bb512602dd6647528882f11fc
592,053
def dict_normalize(in_dict, keys=None, prefix="", suffix="", default=None): """Change the keys of a dictionary :param in_dict: input dictionary :param keys: key list, if None it is using in_dict.keys() (Default: None) :param prefix: prefix for the keys (Default "") :param suffix: suffix for the keys (Default "") :param default: default value passed to get (Default: None) :return: normalized dictionary """ out_dict = {} if not keys: keys = list(in_dict.keys()) for k in keys: # glideFactoryMonitoring.getAllJobRanges(): out_dict[f"{prefix}{k}{suffix}"] = in_dict.get(k, default) return out_dict
0b9fcdaba66dc84b3a7012422ec80c110955372e
395,182
def calc_temp_overlap(start_1, end_1, start_2, end_2): """ Calculate the portion of the first time span that overlaps with the second Parameters ---------- start_1: datetime start of first time span end_1: datetime end of first time span start_2: datetime start of second time span end_2: datetime end of second time span Returns ------- float: The ratio by which the """ # case 1: no overlap - 1 was before 2 if (end_1 < start_2): return 0 # case 2: no overlap - 1 comes after 2 elif (end_2 < start_1): return 0 # case 3: 2 fully in 1 if (start_1 <= start_2) and (end_1 >= end_2): temp_overlap = end_2 - start_2 # case 4: 1 fully in 2 elif (start_2 <= start_1) and (end_2 >= end_1): temp_overlap = end_1 - start_1 # case 5: 1 overlaps 2 from right elif (start_2 <= start_1) and (end_2 <= end_1): temp_overlap = end_2 - start_1 # case 6: 1 overlaps 2 from left elif (start_1 <= start_2) and (end_1 <= end_2): temp_overlap = end_1 - start_2 else: raise Exception("wrong case") temp_overlap = temp_overlap.total_seconds() # no overlap at all assert temp_overlap >= 0, "the overlap can not be lower than 0" dur = end_1 - start_1 if dur.total_seconds() == 0: return 0 else: overlap_ratio = temp_overlap / dur.total_seconds() return overlap_ratio
2fa9882a90628056fba615c58f2ad8907c4d8315
651,837
def mark_as_changed_wrapper(parent_method): """Decorator that ensures _mark_as_changed method gets called.""" def wrapper(self, *args, **kwargs): # Can't use super() in the decorator. result = parent_method(self, *args, **kwargs) self._mark_as_changed() return result return wrapper
8b181532ea98885e7f7283db71a1247d975dd76a
89,980
def get_expression(text, begin, separator): """Find the end of a expression or statement An expression or statement ends at a new-line or at the separator, unless the new-line or separator is encountered inside a string-literal or inside matching bracket pairs. @param text The total text being parsed. @param begin The index at the start of the expression or statement. @param separator The character which ends an expression (not statement). @return (index beyond the terminator, the expression, The terminator or empty on end-of-text). """ end_chars = separator + "\n\r\f" bracket_stack = 0 in_string = None end = len(text) i = begin while i < end: c = text[i] if in_string: if c == in_string: in_string = None elif c == "\\": i += 1 elif c in "'\"": in_string = c elif c in "[({": bracket_stack += 1 elif c in "])}": bracket_stack -= 1 elif c in end_chars and not bracket_stack: break i += 1 expression = text[begin:i].strip() terminator = text[i:i + 1] return i + 1, expression, terminator
e7f04d67121f0d26732149654132b6e65fbb0b96
649,474
def _get_padding(alignment, current_size, next_element_size): """Calculate number of padding bytes required to get next element in the correct alignment """ if alignment == 1: return 0 # Always aligned elem_size = min(alignment, next_element_size) remainder = current_size % elem_size if remainder == 0: return 0 return elem_size - remainder
771a02a4f3363653e87f0549834784037c7cdc98
595,291
def categorize_disorder(scores_df, disfrac_df): """Take a merged disorder input and the derived aggregated disorder fractions and classify proteins categorically according to Deiana et al. (http://dx.doi.org/10.1101/446351) The criteria used are: A10 = 10% disorder fraction A30 = 30% disorder fraction B = 22 consecutive disordered amino acids (we need the raw scores!) The disordered classes are defined below. Arguments: scores_df {pd.DataFrame} -- [description] disfrac_df {pd.DataFrame} -- [description] Returns: cat_disfrac {pd.DataFrame} -- [description] """ # First, conditions are checked for each protein condA10 = {} condA30 = {} condB = {} grouped_disfrac = disfrac_df.groupby(["protein_ac"]) grouped_scores = scores_df.groupby(["protein_ac"]) # Condition A: Disorder fraction for gp, df in grouped_disfrac: if df["disorder_fraction"].unique() < 0.1: condA10[gp] = False condA30[gp] = False elif df["disorder_fraction"].unique() < 0.3: condA10[gp] = True condA30[gp] = False elif df["disorder_fraction"].unique() >= 0.3: condA10[gp] = True condA30[gp] = True else: print("Disorder fraction value not found for {}".format(gp)) # Condition B:Disordered stretches for gp, df in grouped_scores: # Define a rolling window of 22 amino acids wdw = df["score"].rolling(22) # Check how many stretches exist where all scores indicate disorder idrs = df[wdw.min() >= 0.5] if len(idrs) > 0: condB[gp] = True else: condB[gp] = False disorder_type_dict = {} # Categorize disorder fractions for protein in disfrac_df["protein_ac"]: A10 = condA10[protein] A30 = condA30[protein] B = condB[protein] if A10 is False: # 'ORD': Ordered protein disorder_type_dict[protein] = "ORD" elif A10 is True and A30 is False: if B is False: # 'NDP': Not disordered protein disorder_type_dict[protein] = "NDP" else: # 'PDR': Partially disordered protein disorder_type_dict[protein] = "PDR" elif A30 is True: if B is False: # 'FRAG': Fragmentarily disordered protein disorder_type_dict[protein] = "FRAG" else: # 'IDP':Intrinsically disordered protein disorder_type_dict[protein] = "IDP" # Copy disorder fraction and add classification column cat_disfrac = disfrac_df cat_disfrac["disorder_category"] = cat_disfrac["protein_ac"].map(disorder_type_dict) return cat_disfrac
83c42acbbf219409f06b446c8fd234dff8550bf5
374,892
def is_local_host(location): """ :param location: Location string in the format ip[/slot[/port]]. :returns: True if ip represents localhost or offilne else return False. """ return any(x in location.lower() for x in ('localhost', '127.0.0.1', 'offline', 'null'))
f6f88ef64facb21b3a3b24fed9887e5576a5c25b
79,589
def getStatus(sig, det_status): """ Determine a signal's detection status based on the status of its detectors Parameters ---------- sig : dict | (required) A signal record dict generated from a Knack.View instance det_status : dict | (required) A lookup dictionary generated from method groupBySignal() Returns ------- value : string A detection status string of BROKEN, UNKNOWN, NO DETECTIO, OK """ sig_id = "${}".format(sig["SIGNAL_ID"]) if sig_id in det_status: # any broken detector, status is BROKEN if "BROKEN" in det_status[sig_id]["statuses"]: return "BROKEN" # detection must be OK return "OK" else: # no detectors at signal return "NO DETECTION"
f758c70e9a625e4b85bc0ec341ada38f0f2d4a18
262,309
def bw_2_rgb_lambda(img): """simple helper to convert BG to RGB.""" if img.mode == "RGB": return img return img.convert(mode="RGB")
ea112e570d9a62562cd9c2f18fa80dde8466576d
533,883
from typing import List def merge_sort(x: List) -> List: """Merge sort divides a list into two smaller lists, and recursively repeats the process on the two smaller lists till lists of single elements are obtained. These smaller lists are then combined to form a single sorted list of the original elements. It has an average time complexity of Θ(nlogn). Time complexity for the worst case is O(nlogn). Time complexity for the best case is Ω(nlogn). >>> merge_sort([4, 2, 3, 1, 0, 5]) [0, 1, 2, 3, 4, 5] :param x: list to be sorted :return: new sorted list """ length = len(x) if length <= 1: return x mid_idx = length // 2 left = merge_sort(x[0:mid_idx]) right = merge_sort(x[mid_idx:length]) result = [] while len(left) > 0 and len(right) > 0: if left[0] <= right[0]: result.append(left.pop(0)) else: result.append(right.pop(0)) result.extend(left) result.extend(right) return result
995e8d1d275a295b7ddc694bb7d2faaac5f2a17c
53,195
def pa_to_gscm(x): """Pascal to g/cm2. Parameters ---------- x : float or array of floats Air pressure (in Pa) Returns ------- output : float or array of floats Air pressure (in g/cm2) """ return x / 98.0665
4c430c6bb7521ab4719ecf4f3fe61497f6fb54c9
275,471
def getUrl(sIpAddress): """Returns the full cgi URL of the target""" return 'http://' + sIpAddress + '/cgi-bin/xml-cgi'
d51499ef9828ead13f7a17a6a7ba0484cc3d3b91
677,220
def normalise_pagename(page_name, prefix): """ Checks if the page_name starts with the prefix. Returns None if it does not, otherwise strips the prefix. """ if prefix: if not page_name.startswith(prefix): return None else: return page_name[len(prefix):] else: return page_name
c1e4b9038a8caec3d3751ff2f5ab029a9608ff93
277,317
def opt_step( fp_optimizer, tracker, full_batch_size, update_freq, math_mode, world_size ): """Performs one optimizer step. Args: fp_optimizer (:obj:`FP16Optimizer` | :obj:`FP32Optimizer`): The FP Optimizer tracker (:obj:`mlbench_core.utils.Tracker`, optional) The current tracker full_batch_size (int): The total batch size (over all batches since last update) update_freq (int): The update frequency between batches math_mode (str): The used math mode world_size (int): Distributed world size Returns: (bool): Whether the weights were updated or not (i.e. if no overflow detected) """ if math_mode == "fp32": updated = fp_optimizer.step(tracker=tracker, denom=full_batch_size) elif math_mode == "fp16": # This results in reducing tensor and dividing by `loss_scale * full_batch_size` # but we divide by world size before reduction to avoid overflow, and # re-multiply after reduction to rescale multiplier = full_batch_size / (world_size * update_freq) denom = world_size * update_freq updated = fp_optimizer.step(tracker=tracker, denom=denom, multiplier=multiplier) else: raise NotImplementedError("Unknown math mode {}".format(math_mode)) return updated
61c576784537b2aec45d651b720d4eaca58b3db4
497,764
def cohort_to_int(year, season, base=16): """cohort_to_int(year, season[, base]) Converts cohort tuple to a unique sequential ID. Positional arguments: year (int) - 2-digit year season (int) - season ID Keyword arguments: base (int) - base year to treat as 0 Returns: (int) - integer representing the number of seasons since the beginning of the base year """ return 3*(year - base) + season
1f1981eb6c43ab6f77abf6d04ba3b92d9053953d
7,479
def _general_fuel_checker(mass: int) -> int: """Given the mass of a module, calculate the fuel requirement Args: - mass (int): the mass of the module Returns: int: the fuel requirement """ return (mass // 3) - 2
df962587b44316a277cbac0883cc4f6a784a737d
53,560
def add_common_nvcc_variables(env): """ Add underlying common "NVIDIA CUDA compiler" variables that are used by multiple builders. """ # nvcc needs '-I' prepended before each include path, regardless of platform env['_NVCC_CPPPATH'] = '${_concat("-I ", CPPPATH, "", __env__)}' # prepend -Xcompiler before each flag which needs it; some do not disallowed_flags = ['-std=c++03'] # these flags don't need the -Xcompiler prefix because nvcc understands them # XXX might want to make these regular expressions instead of repeating similar flags need_no_prefix = ['-std=c++03', '-std=c++11', '-O0', '-O1', '-O2', '-O3'] def flags_which_need_no_prefix(flags): # first filter out flags which nvcc doesn't allow flags = [flag for flag in flags if flag not in disallowed_flags] result = [flag for flag in flags if flag in need_no_prefix] return result def flags_which_need_prefix(flags): # first filter out flags which nvcc doesn't allow flags = [flag for flag in flags if flag not in disallowed_flags] result = [flag for flag in flags if flag not in need_no_prefix] return result env['_NVCC_BARE_FLAG_FILTER'] = flags_which_need_no_prefix env['_NVCC_PREFIXED_FLAG_FILTER'] = flags_which_need_prefix # CCFLAGS: options passed to C and C++ compilers env['_NVCC_BARE_CCFLAGS'] = '${_concat("", CCFLAGS, "", __env__, _NVCC_BARE_FLAG_FILTER)}' env['_NVCC_PREFIXED_CCFLAGS'] = '${_concat("-Xcompiler ", CCFLAGS, "", __env__, _NVCC_PREFIXED_FLAG_FILTER)}' env['_NVCC_CCFLAGS'] = '$_NVCC_BARE_CCFLAGS $_NVCC_PREFIXED_CCFLAGS' # CXXFLAGS: options passed to C++ compilers env['_NVCC_BARE_CXXFLAGS'] = '${_concat("", CXXFLAGS, "", __env__, _NVCC_BARE_FLAG_FILTER)}' env['_NVCC_PREFIXED_CXXFLAGS'] = '${_concat("-Xcompiler ", CXXFLAGS, "", __env__, _NVCC_PREFIXED_FLAG_FILTER)}' env['_NVCC_CXXFLAGS'] = '$_NVCC_BARE_CXXFLAGS $_NVCC_PREFIXED_CXXFLAGS' # CPPFLAGS: C preprocessor flags env['_NVCC_BARE_CPPFLAGS'] = '${_concat("", CPPFLAGS, "", __env__, _NVCC_BARE_FLAG_FILTER)}' env['_NVCC_PREFIXED_CPPFLAGS'] = '${_concat("-Xcompiler ", CPPFLAGS, "", __env__, _NVCC_PREFIXED_FLAG_FILTER)}' env['_NVCC_CPPFLAGS'] = '$_NVCC_BARE_CPPFLAGS $_NVCC_PREFIXED_CPPFLAGS' # this function takes a list of compilers, and it returns the list with the name of nvcc's default host compiler removed # this is used in the _concat() call below to use the -ccbin switch with nvcc only if env['CXX'] specifies a host compiler # different than its default def filter_default_host_compiler(words): if 'gcc' in words: return [] elif 'g++' in words: return [] return words env['_NVCC_DEFAULT_HOST_COMPILER_FILTER'] = filter_default_host_compiler # CCBIN: the name of the c++ compiler to use for host code env['_NVCC_CCBIN'] = '${_concat("-ccbin=", CXX, "", __env__, _NVCC_DEFAULT_HOST_COMPILER_FILTER)}' # assemble portion of the command line common to all nvcc commands env['_NVCC_COMMON_CMD'] = '$_NVCC_CCBIN $_NVCC_CPPFLAGS $_CPPDEFFLAGS $_NVCC_CPPPATH'
ba44a521a2778eee677f3b799b215a9874a3211b
327,978
def get_km_user_image_upload_path(km_user, imagename): """ Get the path to upload the kmuser image to. Args: km_user: The km_user whose image is being uploaded. imagename (str): The original name of the image being uploaded. Returns: str: The original image filename prefixed with `users/<user_id>/{file}`. """ return "know-me/users/{id}/images/{file}".format( file=imagename, id=km_user.id )
01b350177f9d605b508debc5c391964dffcaf5e1
644,382
def _list_str(lst,sep): """ Returns the list as a string with the given separator and no brackets. """ ret = '' for x in lst: ret += str(x)+sep return ret[:-1*len(sep)] # remove extra separator
2cc0b1f90997f0314abdd8ab769d0e55e9f090db
584,646
def pad(data: bytes, size: int, value: int = 0) -> bytes: """Padds given data with given value until its length is not multiple by size Parameters: data: bytes Data to pad size: int Required size data length must be multiple to value: int Value to add to data. 0 by default """ value = (value & 0xff) while len(data) % size: data += bytes([value]) return data
3879930d6516daa751dcd310c93055b30c36b852
76,377
def split_array(lst: list, size: int): """Splits a list into smaller arrays of the desired size value. Examples: >>> lst = [1,2,3,4,5,6,7,8,9,10]\n >>> split_array(lst, 3)\n [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]] References: https://youtu.be/pG3L2Ojh1UE?t=336 """ created_step_points = range(0, len(lst), size) sublists_created = [lst[i : i + size] for i in created_step_points] return sublists_created
bef48fdc17568d3d648570f263603c29708690e5
314,964
def ratio_col(df, df_cols): # Tested [Y] """ This function computes the ratio between two columns and returns a Dataframe containing the ratio as a column. Args df (pd.DataFrame): Dataframe containing the columns to compute a ratio. df_cols (tuple): A tuple containing the names of columns in the Dataframe to use in computing the ratio. Format is (<numerator>, <denominator>) Return (pd.Series) The inital dataframe entered with addition columns for the ratio of the two columns specified in the df_cols argument """ df[df_cols[0]] = df[df_cols[0]].div(df[df_cols[1]].values, axis=0) return df[df_cols[0]]
a4bfc13a5e87604ddae865f6df3b9b123359be52
22,264
def _get_loadings(params, info, dimensions): """Create the array of factor loadings.""" return params[info["loadings"]].reshape(-1, dimensions["n_states"])
27ab3a77660681bfb1da69c94658c831dd9d4f9c
231,855
import collections def flatten_dict(d: dict, sep: str = "_") -> dict: """ convert the AST structure 'AST': {'equals': [ {'nabla': ['2911']},{'function': ['1452']}]}} to {'equals_0_nabla_0': '2911', 'equals_1_function_0': '1452'} from https://medium.com/better-programming/how-to-flatten-a-dictionary-with-nested-lists-and-dictionaries-in-python-524fd236365 Args: d: dict to be flattened sep: Returns: Raises: >>> flatten_dict({},'_') """ # do not include "logger.info()" here because this function is called very often # note: logging of start/stop is not enabled for this function because I do not plan on altering the code in this function obj = collections.OrderedDict() def recurse(t, parent_key=""): if isinstance(t, list): for i in range(len(t)): recurse(t[i], parent_key + sep + str(i) if parent_key else str(i)) elif isinstance(t, dict): for k, v in t.items(): recurse(v, parent_key + sep + k if parent_key else k) else: obj[parent_key] = t recurse(d) return dict(obj)
d383d03aec55e7979e619592ed1db2aed1d5e11c
569,023
def parse_sas_token(sas_token): """Parse a SAS token into its components. :param sas_token: The SAS token. :type sas_token: str :rtype: dict[str, str] """ sas_data = {} token = sas_token.partition(' ')[2] fields = token.split('&') for field in fields: key, value = field.split('=', 1) sas_data[key.lower()] = value return sas_data
957665a70fb10e9e7688a9f7ecb637bcf7f82ab6
57,682
import logging def render_disassembly(dis, match_offset, match_len, context_lines=4): """ Accepts a DecodeGenerator from distorm and returns a string that will be directly rendered in the ICE yara results page dis: DecodeGenerator from distorm.Decode() match_offset: offset into the file where the match occured match_len: Length of yara match context_lines: How many lines of disassembly to return before and after the matching lines """ lines = [] first_line = None last_line = None for i in range(len(dis)): instr = dis[i] asm = "0x{:08X} {:<20}{}".format(instr[0], instr[3], instr[2]) if instr[0] >= match_offset and instr[0] < match_offset + match_len: lines.append("<b>{}</b>".format(asm)) if not first_line: first_line = i else: lines.append(asm) if first_line and not last_line: last_line = i lines = ( lines[:first_line][-context_lines - 1 :] + lines[first_line:last_line] + lines[last_line:][:context_lines] ) logging.error("Rendered disassembly: {}".format("\n".join(lines))) return "\n".join(lines)
ce719252bae1f5833e788922832cf11207f63deb
20,088
import torch from typing import Callable def split_distance( a: torch.Tensor, b: torch.Tensor, dist_fn_1: Callable, dist_fn_2: Callable, split_idx: int, ) -> torch.Tensor: """ Splits both `a` and `b` in two parts at `split_idx`, then applies two different distance functions for each part. This is useful on a mixed dataset containing embeddings and continous values. Parameters ---------- a : torch.Tensor The first Tensor b : torch.Tensor The second Tensor dist_fn_1 : Callable The distance function for the first chunks dist_fn_2 : Callable The distance function for the second chunks split_idx : int The split point for `a` and `b` """ a1, a2 = torch.split(a, [split_idx, a.size(-1) - split_idx], dim=-1) b1, b2 = torch.split(b, [split_idx, b.size(-1) - split_idx], dim=-1) dist_1 = dist_fn_1(a1, b1) dist_2 = dist_fn_2(a2, b2) def normalize(a: torch.Tensor) -> torch.Tensor: return (a - a.min()) / (a.max() - a.min()) dist_1 = normalize(dist_1) dist_2 = normalize(dist_2) return (dist_1 * a1.shape[-1] / a.shape[-1]) + (dist_2 * a2.shape[-1] / a.shape[-1])
31f1ec9396b1bb76352e79d97d8b31ecd8eedf0f
608,222
def get_parameter_overview(variables, limit=40): """Returns a string with variables names, their shapes, count, and types. To get all trainable parameters pass in `tf.trainable_variables()`. Args: variables: List of `tf.Variable`(s). limit: If not `None`, the maximum number of variables to include. Returns: A string with a table like in the example. +----------------+---------------+------------+---------+ | Name | Shape | Size | Type | +----------------+---------------+------------+---------+ | FC_1/weights:0 | (63612, 1024) | 65,138,688 | float32 | | FC_1/biases:0 | (1024,) | 1,024 | float32 | | FC_2/weights:0 | (1024, 32) | 32,768 | float32 | | FC_2/biases:0 | (32,) | 32 | float32 | +----------------+---------------+------------+---------+ Total: 65,172,512 """ max_name_len = max([len(v.name) for v in variables] + [len("Name")]) max_shape_len = max([len(str(v.get_shape())) for v in variables] + [len( "Shape")]) max_size_len = max([len("{:,}".format(v.get_shape().num_elements())) for v in variables] + [len("Size")]) max_type_len = max([len(v.dtype.base_dtype.name) for v in variables] + [len( "Type")]) var_line_format = "| {: <{}s} | {: >{}s} | {: >{}s} | {: <{}s} |" sep_line_format = var_line_format.replace(" ", "-").replace("|", "+") header = var_line_format.replace(">", "<").format("Name", max_name_len, "Shape", max_shape_len, "Size", max_size_len, "Type", max_type_len) separator = sep_line_format.format("", max_name_len, "", max_shape_len, "", max_size_len, "", max_type_len) lines = [separator, header, separator] total_weights = sum(v.get_shape().num_elements() for v in variables) # Create lines for up to 80 variables. for v in variables: if limit is not None and len(lines) >= limit: lines.append("[...]") break lines.append(var_line_format.format( v.name, max_name_len, str(v.get_shape()), max_shape_len, "{:,}".format(v.get_shape().num_elements()), max_size_len, v.dtype.base_dtype.name, max_type_len)) lines.append(separator) lines.append("Total: {:,}".format(total_weights)) return "\n".join(lines)
3be401b71a609f01031a411b2543b814ad537473
376,312
def get_class_labels(f_labels): """ Return name list (i.e., class labels). :param f_labels: file of labels to read in :return: class labels (i.e., contents of f_labels) """ with open(f_labels) as fid: labels = fid.readlines() def clean_labels(x): return x.rstrip() return list(map(clean_labels, labels))
d820271ce78291cf23c95277d56be1207de3e62c
70,469
from bs4 import BeautifulSoup def clean(review): """ Clean input review: remove HTML, convert words to lower cases. """ # Remove HTML review_text = BeautifulSoup(review, "html.parser").get_text() # Convert words to lower case review_text = review_text.lower() return review_text
5150bb127d84e8b548a826ed4e352e3196baa4df
544,415
def keywordInStr(keywordList, string): """ Decision if there is any keyword from the keywordList in the string, return True or False """ return any(keyword in string for keyword in keywordList)
e79d0aaa8626df3bec9d632963b367292fb90b98
575,643
def count_increased_part2(values): """ Count how many measurements increased in a three-measurement sliding window :param values: List of values :returns: Number of measurements that increased in a three-measurement sliding window """ # how many measurements increased in three-measurement sliding window increased = 0 for i in range(2, len(values) - 1, 1): # print(str(values[i - 2]) + ", " + str(values[i - 1]) + ", " + str(values[i])) # print(str(values[i - 1]) + ", " + str(values[i]) + ", " + str(values[i + 1]) + "\n") window1 = values[i - 2] + values[i - 1] + values[i] window2 = values[i - 1] + values[i] + values[i + 1] if window2 > window1: increased += 1 return increased
22fd441f91d5e4c84cfe6c46ab37d5b2f212fa78
603,247
import random def random_string(n): """Generates a random alphanumeric (lower case only) string of length n.""" if n < 0: return '' return ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(n))
7ffce5350560ab0405f06dbda2201367e2f26a54
438,254
def get_type(record): """Get ILL type from legacy request_type.""" ill_type = "MIGRATED_UNKNOWN" if record["request_type"] == "article": ill_type = "DIGITAL" if record["request_type"] == "book": ill_type = "PHYSICAL_COPY" return ill_type
b61b73e88a467292505e9ec9d2ceb6948c623e3a
394,126
def _get_vod_paths(playlist): """Extract unique VOD paths for download from playlist.""" files = [] for segment in playlist.segments: if segment.uri not in files: files.append(segment.uri) return files
324da93b8e5c4e981ce3f856466eb8d8f80430c1
535,165
def sym_has_params(sym, input_names): """ Judge if a model has parameters. Args: sym: A Symbol instance of the model. input_names: A list of input tensor names. Return: True for the model has parameters and Flase for not. """ args = set(sym.list_arguments()) params = args - set(input_names) ret = True if not params: #if len(params) == 0: ret = False return ret
d06d674e485950561631cb5659f455cf36fa47f8
89,327
def parent_path(xpath): """ Removes the last element in an xpath, effectively yielding the xpath to the parent element :param xpath: An xpath with at least one '/' """ return xpath[:xpath.rfind('/')]
b435375b9d5e57c6668536ab819f40ae7e169b8e
5,179
def default_charge_set(i: int) -> set: """Set of defect charge states. -1 (1) is included for positive (negative) odd number. E.g., default_charge_set(3) = [-1, 0, 1, 2, 3] default_charge_set(-3) = [-3, -2, -1, 0, 1] default_charge_set(2) = [0, 1, 2] default_charge_set(-4) = [-4, -3, -2, -1, 0] Args: i (int): an integer Returns: Set of candidate charges """ if i >= 0: charge_set = {i for i in range(i + 1)} if i % 2 == 1: charge_set.add(-1) else: charge_set = {i for i in range(i, 1)} if i % 2 == 1: charge_set.add(1) return set(charge_set)
5b046d372b37cd0749b9c99824329e8a065a91ca
183,161
def convert_all(string: str) -> int: """ Input is the binary representation of a number. R or B means 1. L or F means 0. Return the value of this binary representation """ return int("".join("1" if char in ("R", "B") else "0" for char in string), 2)
d9e28f266860bffa3f025ee7465d86deb0226ad4
496,155
def not_better_than_after(minimal, n_iter): """Return a stop criterion that returns True if the error is not less than `minimal` after `n_iter` iterations.""" def inner(info): return info['n_iter'] > n_iter and info['loss'] >= minimal return inner
cccc50da383f53da61c35bf000d1ac7431ef3107
639,888
from typing import List from typing import Counter def extract_word_ngrams(tokens: List[str], n: int) -> Counter: """Extracts n-grams with order `n` from a list of tokens. :param tokens: A list of tokens. :param n: The order of n-grams. :return: a Counter object with n-grams counts. """ return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
6504e8b40f3cd256c34d2aa0e44660eded49a506
51,485
import random def random_pair(n, min_dist=1, index1=None): """ Return a random pair of integers in the range [0, n) with a minimum distance between them. Args: n (int): Determine the range size min_dist (int): The minimum distance between the random pair index1 (int, optional): If specified, this will determine the first integer Returns: (int, int): The random pair of integers. """ r1 = random.randint(0, n - 1) if index1 is None else index1 d_left = min(r1, min_dist) d_right = min(n - 1 - r1, min_dist) r2 = random.randint(0, n - 2 - d_left - d_right) r2 = r2 + d_left + 1 + d_right if r2 >= (r1 - d_left) else r2 return r1, r2
41e502803710d52c6eda9cd9c6d1288c899cfb27
326,133
def jsonpath_to_variable(p): """Converts a JSON path starting with $. into a valid expression variable""" # replace $ with JSON_ and . with _ return p.replace('$', 'JSON_').replace('.', '_')
ee09b0a6d0a24c414f446d7ef18edb3ef118fd1e
33,912
import re def strip_html(base): """Strip html tags from a string""" # use html parser? return re.sub(r"<[^>]+>", "", base)
876543757f37c85cd5eb9c70e3d9e8c339b91d86
537,666
import math def expected_unanchored_L2_discrepancy(num_points, dimension): """Expected value for unanchored L2 discrepancy of random uniform points. Note that this is the square root of :math:`\\mathrm{E}(T^2)`, i.e. ``sqrt(1.0 / n * (6 ** -d) * (1 - 2 ** -d))``. For details see [Morokoff1994]_. """ assert num_points > 0 assert dimension > 0 return math.sqrt(1.0 / num_points * (6 ** -dimension) * (1 - 2 ** -dimension))
22f3230a6eb2c5a7cecccd97171df3cf85085f86
594,287
def inference(image, model, epsilon, adv_method): """Do inference on a image batch and the adversarial image batch""" prediction, _ = model(image) target = prediction.argmax(-1) noise = adv_method(image, model, target, epsilon) adversarial_prediction, _ = model(image + noise) return prediction, adversarial_prediction, image + noise
d7de4124e6b128372c2c2c606c12e2d221e393ff
289,851
def mock_context_one_device_match_vid(mock_context, mock_device_with_vid_factory, mock_interface_settings_match, valid_vendor_id): """ Fixture that yields a mock USB context that yields one device that matches based on vendor id, class, subclass, and protocol. """ device = mock_device_with_vid_factory(valid_vendor_id) device.iterSettings.return_value = [mock_interface_settings_match] mock_context.getDeviceList.return_value = [device] return mock_context
798117e80216865770f4a3aeb1a191e91ade80c9
178,510
def makeFields(prefix, n): """Generate a list of field names with this prefix up to n""" return [prefix+str(n) for n in range(1,n+1)]
435571557ef556b99c4729500f372cc5c9180052
2,992
def get_vcf_samples(path): """ Fetch sample names from a VCF file """ with open(path, 'r') as vcf_file: for line in vcf_file: if line.startswith('##'): continue if line.startswith('#'): return line.strip().split('\t')[9:] raise ValueError(('Reached a line not starting with # before ' 'finding the header line, which should start with ' 'a single #'))
9ffc269a826963b76ce48c5ab78673861c8eca5b
556,860
def getApproximateValue(value): """ Return an approximate representation of any numerical value. Handles lists of values, and does not modify the value if it is not numerical. Args: value: Any object or value Returns: An approximate version of the value if it is numeric, or a list of numeric values (handles recursion), otherwise, the unmodified value. """ if isinstance(value, (int, float)): approxVal = round(value, 3) return 0 if approxVal == 0 else approxVal elif hasattr(value, '__iter__'): return [getApproximateValue(v) for v in value] else: return value
9a8e55c1484180476e71d282cfd93a4f036a3a9f
298,061
import operator def dot(list1, list2): """return usual inner product of two lists""" return sum(map(operator.mul, list1, list2))
074793340d64037efd72058b75309ef7ddc36287
390,563
def _get_warmup_factor_at_iter( method: str, iter: int, warmup_iters: int, warmup_factor: float ) -> float: """ Return the learning rate warmup factor at a specific iteration. See https://arxiv.org/abs/1706.02677 for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration. """ if iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor elif method == "linear": alpha = iter / warmup_iters return warmup_factor * (1 - alpha) + alpha else: raise ValueError("Unknown warmup method: {}".format(method))
9ac22af3d2291b9c4bf205e59d7c63ba4fa1f518
479,035
def __world_coordinate_system_from(header): """ From the given NRRD header, determine the respective assumed anatomical world coordinate system. Parameters ---------- header : dict A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example). Returns ------- str The three-character uppercase string determining the respective anatomical world coordinate system (such as "RAS" or "LPS"). Raises ------ IOError If the header is missing the "space" field or the "space" field's value does not determine an anatomical world coordinate system. """ try: system_str = header["space"] except KeyError as e: raise IOError("Need the header's \"space\" field to determine the image's anatomical coordinate system.") if len(system_str) == 3: # We are lucky: this is already the format that we need return system_str.upper() # We need to separate the string (such as "right-anterior-superior") at its dashes, then get the first character # of each component. We cannot handle 4D data nor data with scanner-based coordinates ("scanner-...") or # non-anatomical coordinates ("3D-...") system_components = system_str.split("-") if len(system_components) == 3 and not system_components[0].lower() in ["scanner", "3d"]: system_str = "".join(c[0].upper() for c in system_components) return system_str raise IOError("Cannot handle \"space\" value {}".format(system_str))
3c52572ef14c3deb1bc752a8bc5daa73abcb95d8
550,092
def _calculate_n_atoms_(elemental_array): """ Calculates the total number of atoms in the system. :param elemental_array: an array of dictionaries as generated by either _create_compositional_array_() or _consolidate_elemental_array_() :return: the total number of atoms (can be a partial number in the case of alloys or partial hydrates) """ return sum(a["occurances"] for a in elemental_array)
c30ee22db974e7573b4025289f437f517d3159e6
375,432
def service_request_error_response(error): """ Returns the error response that gets passed to error handlers on the service request effect. That is, (type of error, actual error, traceback object) Just returns None for the traceback object. """ return (type(error), error, None)
09e9ed0fffa49b7b1e1b122f9d91227552c708e6
456,379
def get_atlasprob(data, x, y, z): """Extract atlas probability values according to coordinate. return a probability list. Parameters ---------- data: The nii data. x,y,z: The coordinate value of target voxel. """ prob = data[x, y, z, :] / 100.0 return prob
5c996f5a2fafa5be7fc708c6a7a30acaf56bacd4
351,318
def EVLAAIPSName( project, session): """ Derive AIPS Name. AIPS file name will be project+session with project truncated to fit in 12 characters. * project = project name * session = session code """ ################################################################ Aname = Aname=(project.strip()+session)[0:12] return Aname # end EVLAAIPSName
3d3c3825ea5adf563834b8e047cb0860d5ae30ef
302,255