content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_sender(msg): """Return message sender.""" return msg.split('!', 1)[0][1:]
4629c9bbef09d175326ced751656cdf35eda8493
71,076
from typing import Tuple from typing import Dict from typing import Any def build_single_text_output(**kwargs) -> Tuple[Dict[str, Any], str]: """ creates output with the dictionary returned from the request and the text attached to it Args: **kwargs: handled_result (Dict): response received from Qualys API Returns: Tuple containing a dictionary and the text returned in the response """ output = kwargs['handled_result'] readable_output = output['TEXT'] return output, readable_output
e2ef9dc7d930e5f4734a4f622af112f01fc259fc
71,078
import math def pivot_value(row): """ Returns the value of pivot in a row """ for element in row: if element > math.exp(-8): return element return 0
4b99feee6d6c68d88a04e2013a75255594e624b2
71,080
import sqlite3 def _sqlite_try_max_variable_number(num): """ Tests whether SQLite can handle num variables """ db = sqlite3.connect(':memory:') try: db.cursor().execute( "SELECT 1 IN (" + ",".join(["?"] * num) + ")", ([0] * num) ).fetchall() return num except BaseException: return -1 finally: db.close()
cdf89a67565908bcc683a5786a3e6750e99ff3d7
71,082
import math def chute_velocity(mass, diameter, drag=0.75, gravity=9.8, air_density=1.22): """ Determine the velocity of the rocket when it hits the earth given the mass of the rocket and the diameter of the chute. mass: mass of rocket in kg diameter: diameter of the chute in meters drag: drag coefficient for chute """ return math.sqrt((8 * mass * gravity) / (math.pi * air_density * drag * diameter ** 2))
d07ee18e3c24fb9879baacc8a2fbc3eeeea59b7a
71,089
def evaluate_model(model, scaled_test_images, test_labels): """ This function should evaluate the model on the scaled_test_images and test_labels. Your function should return a tuple (test_loss, test_accuracy). """ test_loss,test_accuracy = model.evaluate(scaled_test_images,test_labels, verbose=2) return (test_loss,test_accuracy)
29eec6cc14c22708b30a4cc6fcbe5aedc7fe663d
71,092
def _color_strip(color): """ 去除字符串中的多余空格 Parameters ---------- color : str Returns ------- str 返回去除了空格的颜色字符串 """ return color.strip().replace(' ', '')
8b7f815c64d1a9bf57cbd76db260e64f450783de
71,093
def plastic(diffuse, specular, nonlinear, intior,extior): """[Plastic material dict] Args: diffuse ([list]): [rgb values] specular ([list]): [rgb values] nonlinear ([bool]): [description] intior ([float]): [description] extior ([list]): [description] Returns: [dict]: [material dict] """ return { "type" : "roughplastic", "diffuse_reflectance" : { "type" : "rgb", "value" : diffuse, }, 'nonlinear':False, 'int_ior':intior, 'ext_ior':extior, 'specular_reflectance':{ "type" : "rgb", "value" : specular, } }
5783a5feca30246f3d5f220ee39c7385ea8725bf
71,095
def cents_to_pitch(c, tonic): """ Convert cents value, <c> to pitch in Hz :param c: Pitch value in cents above <tonic> :type c: float/int :param tonic: Tonic value in Hz :type tonic: float :return: Pitch value, <c> in Hz :rtype: float """ return (2**(c/1200))*tonic
7b20cfe9edbb2b7db5a1690deec5498d81face05
71,098
def url_string(it): """ Get the full url used to dowload the data of the given date """ b_link ="https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni-" return b_link + it + ".csv"
f717cf841e757255e2d334f428f5ce44ad260edd
71,101
def get_distance(highway_now: list, car_index: int) -> int: """ Get the distance between a car (at index car_index) and the next car >>> get_distance([6, -1, 6, -1, 6], 2) 1 >>> get_distance([2, -1, -1, -1, 3, 1, 0, 1, 3, 2], 0) 3 >>> get_distance([-1, -1, -1, -1, 2, -1, -1, -1, 3], -1) 4 """ distance = 0 cells = highway_now[car_index + 1 :] for cell in range(len(cells)): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(highway_now, -1)
4f37519564097bef96a2e923f414e098e57a3650
71,102
import json def load_dict_from_json(file_: str, key_type: type = int): """ Read in json file. Convert keys to `key_type'. Inverse to `save_dict_to_json`. Parameters ---------- file_: Name of the file to read in. key_type: Type to convert the keys into. Returns ------- dct: The json file contents. """ with open(file_, 'r') as f: _dct = json.load(f) dct = {} for key, val in _dct.items(): dct[key_type(key)] = val return dct
691426486ca160d5fdcd47763308600456d33235
71,104
def goal_fitness_reached(generation_best_fitness, goal_fitness, minimize): """ This function returns True if the goal fitness is reached. :param generation_best_fitness: (int) Current generation best fitness. :param goal_fitness: (int) Goal fitness. :param minimize: (bool) If 1, the goal is minimize the fitness function and viceversa. :return: (bool) True if the goal fitness is reached. """ if minimize: return generation_best_fitness <= goal_fitness else: return generation_best_fitness >= goal_fitness
8d9a9e42542369c948a811ad2c9b8f92262c796b
71,107
import numbers import math from warnings import warn def clean_nan_values(inputs: dict) -> dict: """ Recursively replace NaN, Inf values (np.float) into None in place. This is because AiiDA does not support serializing these values as node attributes. """ for key, value in inputs.items(): if isinstance(value, dict): clean_nan_values(value) if isinstance(value, numbers.Real) and (math.isnan(value) or math.isinf(value)): warn('Key <{}> has value <{}> replaced by <{}>'.format(key, value, str(value))) inputs[key] = str(value) return inputs
dcf7cb15030c40057c7732e4fba792d852820659
71,109
def wait_for_interactive(self, timeout=None): """ DEPRECATED! Use wait_until_interactive command instead. """ self.logger.warning('"wait_for_interactive" command is deprecated, use "wait_until_interactive" instead!') self.wait_until_interactive(timeout=timeout) return self
e24f38b33a9c8f9a39263a1cbda673f80fa49bd7
71,113
def get_tagged_atoms_from_mol(mol): """Takes an RDKit molecule and returns list of tagged atoms and their corresponding numbers. Parameters ---------- mol: rdkit.Chem.Mol RDKit molecule. Returns ------- atoms: List[rdkit.Chem.Atom] List of tagged atoms atom_tags: List[str] List of atom-mapping numbers """ atoms = [] atom_tags = [] for atom in mol.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atoms.append(atom) atom_tags.append(str(atom.GetProp("molAtomMapNumber"))) return atoms, atom_tags
b32d57a2feb7196907cdf70bf62371f58fe66c6a
71,117
import logging import requests import time def request_url_json(url: str, max_retries: int = 1, retry_interval: int = 10) -> dict: """Get JSON object version of reponse to GET request to given URL. Handles exception ReadTimeout. Args: url: URL to make the GET request. max_retries: Number of timeout retries to be made before returning empty dict. retry_interval: Wait interval in seconds before retying. Returns: JSON decoded response from the GET call. Empty dict is returned in case the call times out after max_retries. """ logging.info('Requesting url: %s', url) try: req = requests.get(url) if req.status_code == requests.codes.ok: response_data = req.json() else: response_data = {'http_err_code': req.status_code} logging.error('HTTP status code: ' + str(req.status_code)) return response_data except requests.exceptions.ReadTimeout: if max_retries> 0: logging.warning('Timeout occoured, retrying after 10s.') time.sleep(10) return request_url_json(url, max_retries - 1, retry_interval) else: return {}
33f5e4eb61037ac36fc5a8d979f04e17b73fc197
71,122
def attr_setdefault(obj, name, value): """Like dict.setdefault, but for objects.""" if not hasattr(obj, name): setattr(obj, name, value) return getattr(obj, name)
3bad983947ced3fd413b266ec30149818193d1e2
71,128
def isValidWord(word, hand, wordList): """ Returns True if word is in the wordList and is entirely composed of letters in the hand. Otherwise, returns False. Does not mutate hand or wordList. word: string hand: dictionary (string -> int) wordList: list of lowercase strings """ # MY IMPLEMENTATION # make copy of hand, so hand is not mutated handcop = hand.copy() # return True if both are True (AND) # short circuit evaluation # first condition: valid word is in the word list if word in wordList: # second condition: word is entirely composed of letters in the hand # for every letter in word for letter in word: # check whether it is key in hand dictionary and its value is still bigger than 0 if (letter in handcop) and (handcop[letter] > 0): # if so, change value associated with that letter in handcop dictionary by -1 handcop[letter] = handcop[letter] - 1 # if either key is not in hand dictionary or its value is still equal or smaller than 0, return Flase elif (letter not in handcop) or (handcop[letter] <= 0): return False # loop has checked that all letters of valid word are in hand, so both conditions evaluated to True return True # elif word is not in the word list, return False elif word not in wordList: return False
ccb8ae2bb69f3363db4a9212bd71d7600152fd04
71,132
def parse_result(is_emails_empty, is_phones_empty, dict_of_options): """ Control the result of parsing html document, in search for emails and phones. :param bool is_emails_empty: takes the boolean value of check if the list containing emails is not empty (lesser than 1) :param bool is_phones_empty: takes the boolean value of check if the list containing phones is not empty (lesser than 1) :param dict dict_of_options: dictionary of options that are used to find emails and phones :return: result string :rtype: str """ ''' Basically - try every option if there are still options. ''' if (is_emails_empty or is_phones_empty) and (all(dict_of_options.values()) is False) : # if there are no emails or no values and there are still options return "trying" else: if (is_phones_empty or is_emails_empty) and all(dict_of_options.values()): # if there are no emails or no values and there are no options left return "got_some" elif (not is_phones_empty and not is_emails_empty): return "got_all"
e828da7066317c52fa004dd3558179348eb3b31e
71,136
from typing import List from pathlib import Path def prepare_ffmpeg_list(files_list: List[Path]) -> str: """Prepare ffmpeg input list file""" output_str = '' for filename in files_list: output_str += f"file '{filename}'\n" return output_str
2ab45751f27c87ed9b464f91df3b71dfe935a6f8
71,146
import pipes def shell_join(array): """ Return a shell-quoted version of the input array. :param array: input array :return: the shell-quoted string """ return " ".join(pipes.quote(item) for item in array)
8a964df48f09d4e038e0353f8c8941dfcfa720c6
71,147
def oddevens(start: int, end: int, /) -> tuple: """ Returns a tuple of lists of odd and even numbers Parameters: start (int): The start of the range end (int): The end of the range """ number_range = list(range(start, end+1)) number_dict = {"Odds": list(filter(lambda x: x%2 ==1, number_range)), "Evens": list(filter(lambda x: x%2 ==0, number_range))} return (number_dict["Odds"], number_dict["Evens"])
4f0d467081da4a6d02691b91182b46226b990e1d
71,153
def parse_metrics(fileIn): """Takes a text file where a '=' is the delimter in a key value pair and return a python dict of those values """ f = open(fileIn, "r") data = f.readlines() f.close() ret = {} for line in data: l = line.strip().split("=") key = l[0].strip() value = l[-1].strip() ret[key] = value return ret
29ac1c866560588f2ec27034d674c718b227ace2
71,158
import random def prevalence_classify(training_set, test_set, label): """Assumes training_set & test_set lists of examples Uses a prevalence-based classifier to predict whether each example in test_set is of class label Returns number of true positives, false positives, true negatives, and false negatives""" num_with_label = 0 for e in training_set: if e.get_label()== label: num_with_label += 1 prob_label = num_with_label/len(training_set) true_pos, false_pos, true_neg, false_neg = 0, 0, 0, 0 for e in test_set: if random.random() < prob_label: #guess label if e.get_label() == label: true_pos += 1 else: false_pos += 1 else: #guess not label if e.get_label() != label: true_neg += 1 else: false_neg += 1 return true_pos, false_pos, true_neg, false_neg
a3b7704d9cd126ea18572e90342cf2976e9cf84e
71,162
def build_prefix_table(pattern): """ Helper function to build a prefix table - stores length of longest prefix suffix ending at each index in pattern Time complexity: O(n), n = Length of pattern Space complexity: O(n), n = Length of pattern """ prefix_table = [] if len(pattern) > 0: prefix_table.append(0) previous_length = 0 i = 1 while i < len(pattern): if pattern[i] == pattern[previous_length]: prefix_table.append(previous_length + 1) previous_length += 1 i += 1 else: if previous_length != 0: previous_length = prefix_table[previous_length - 1] else: prefix_table.append(0) i += 1 return prefix_table
a02317edf0a22d0226b9ded54af914213f7996a6
71,171
def remove(tuple1, tuple2): """Returns tuple1 without all the element in tuple2.""" if tuple2 is None: return tuple1 return tuple([e for e in tuple1 if e not in tuple2])
3daa839a9d7d93c1251da1cfb875a8d761bd20c5
71,173
def _build_static_transition_impl(settings, attr): """ Transition that enables static builds with CGo and musl for Go binaries. """ return { "@io_bazel_rules_go//go/config:static": True, "//command_line_option:crosstool_top": "//build/toolchain/musl-host-gcc:musl_host_cc_suite", }
2fdc38b7eb77d1dbab453d89a85aed9f725857e7
71,174
def is_six_connected_neighbor(coord_first, coord_second): """ Determines if the voxel with the second coordinates is a six-connected neighbor of the voxel with the first coordinates. Note: Assumes voxel coordinates are integers. :type coord_first: tuple[int] :param coord_first: first coordinate set :type coord_second: tuple[int] :param coord_second: second coordinate set :return: whether the voxel with the second coordinates is a six-connected neighbor of the voxel with the first coordinates """ (x1, y1, z1) = coord_first (x2, y2, z2) = coord_second dist_x = abs(x2 - x1) dist_y = abs(y2 - y1) dist_z = abs(z2 - z1) return dist_x + dist_y + dist_z == 1
08b677a85d17a5406a25add357330d7f5572ea7c
71,176
import fnmatch def match_layer_names(names, patterns): """Match image layer names against a pattern. Use this function to implement operators that operate on multiple image layers. `patterns` is a :class:`str` that can contain multiple whitespace delimited patterns. Patterns can include ``"*"`` which matches everything, ``"?"`` to match a single character, ``"[seq]"`` to match any character in seq, and ``"[!seq]"`` to match any character not in seq. Parameters ---------- names: Sequence of :class:`str`, required The :ref:`image<images>` layer names to be matched. pattern: :class:`str`, required Whitespace delimited collection of patterns to match against layer names. Returns ------- names: sequence of :class:`str` layer names that match `patterns`. """ output = [] for name in names: for pattern in patterns.split(): if fnmatch.fnmatchcase(name, pattern): output.append(name) break return output
4e6db782f8324ee0626bdbbda5f16becfde4a4b8
71,177
def fill_args(args): """ Fill additional args for submission. """ args.agent_module = 'dstar_sgolam_walker' args.checkpoint_path = None args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml' args.num_episodes = 25 return args
f244713ca048f21d086abb88ef0ed9d62e45132e
71,181
from typing import Union from pathlib import Path def normalize_path(path: Union[str, Path]) -> str: """ Turn input file paths into a format that HTCondor can understand. In particular, all local file paths must be turned into posix-style paths (even on Windows!) """ if isinstance(path, Path): return path.absolute().as_posix() if '://' in path: # i.e., this is an url-like input file path return path return normalize_path(Path(path))
45a4daff9b886441207f24e70e47a007f63357df
71,182
def unroll_reflection(thread_count): """ If a threadcount is a symmetrical, as signified by the /, "unroll" it by reversing the non-terminal stripes: >>> unroll_reflection('B/1 LB1 Y/1') 'B/1 LB1 Y/1 LB1' NOOP if not symmetrical >>> unroll_reflection('B1 LB1 Y1') 'B1 LB1 Y1' """ if '/' in thread_count: blocks = thread_count.split(' ') return ' '.join(blocks + blocks[-2:0:-1]) return thread_count
613aba4fd8d64c4626c511e60d2620547cb877a3
71,183
def score_corrupt_char(char: str) -> int: """Return score for corrupt character, else 0""" scores = {")": 3, "]": 57, "}": 1197, ">": 25137} return scores.get(char, 0)
84be2d44dc7872a07f5a1c239a7f8ca20df0c242
71,186
def hamming_weight(n, subseq=None): """Returns the Hamming weights of all non-negative integers in the interval [0, 2**n). The terms of this sequence are generated via a recurrence relation. If 'subseq' is passed all the terms in [0, 2**(n-1)), the rest of of the terms can be generated more efficiently. """ weights = subseq if subseq else [0] while len(weights) < 2**n: for i in weights.copy(): weights.append(i + 1) return weights
445d0bf32b783daf7cc0572985341b32b9bca5c8
71,188
def gender_similarity_scorer(gender_1: str, gender_2: str): """Compares two gender strings, returns 0 if they match, returns penalty of -1 otherwise. Conservative assumption: if gender is nil or empty string, consider it a match against the comparator. """ if gender_1 == gender_2: return 0 elif gender_1 is None or gender_2 is None: return 0 elif gender_1 == "" or gender_2 == "": return 0 else: return -1
eb982ac5a6f18e66caf911254eaf1bf43496183d
71,189
from typing import Callable def has_non(function: Callable) -> Callable: """ A convenient operator that takes a function (presumed to have the same signature as all of the other functions in this section), and returns the logical "converse" - that is, a function which returns True if any part of the input string fails to return True on the given function. For example, if we had a function `is_greek` which returns True is a string consists entirely of Greek cheracters - then `has_non(is_greek)` would return a callable which returns True if the given string contains any non-Greek characters. Note the function thus produced may be quite inefficient. But in a pinch it can be useful to whip out "converse" forms of validating functions in this way. """ def wrapped(string: str) -> bool: if isinstance(string, str): return any(not function(_) for _ in string) raise ValueError("invalid input - not a string") return wrapped
f95c140bb7d295b8557dc9d7ddb3e42b1bf74ebf
71,190
import csv def openfile(filename,separator=";"): """ Opens a csv file and returns a dict. """ output = [] fieldnames = ["word","document","coldoc","page","line","wordnr", "type"] with open(filename, newline='', encoding="utf-8") as csvfile: data = csv.DictReader(csvfile, fieldnames=fieldnames, delimiter=";", quotechar='"') for item in data: output.append(item) return output
fb551ce9d3e23519e8e59b03e2a326e012189caf
71,192
import torch def sinkhorn(log_alpha, n_iters = 20): # torch version """Performs incomplete Sinkhorn normalization to log_alpha. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the successive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(log_alpha) (element wise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. [1] Sinkhorn, Richard and Knopp, Paul. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967 Args: log_alpha: a 2D tensor of shape [N, N] n_iters: number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for N~100) Returns: A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are converted to 3D tensors with batch_size equals to 1) """ n = log_alpha.size()[1] log_alpha = log_alpha.view(-1, n, n) for i in range(n_iters): log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1) log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n) return torch.exp(log_alpha)
d72f33f4415f0676221d069cddd08c795d458193
71,193
def ill_notifications_filter(record, action, **kwargs): """Filter notifications. Returns if the notification should be sent for given action """ action_filter_map = { "extension_accepted": False, "extension_declined": False, "extension_requested": True, } return action_filter_map[action]
642b6734c86b0a6f9a753a38b5cc4ff148a3e82a
71,194
def open_output(name, mode='a+'): """ Open output for writing, supporting either stdout or a filename """ if name == '-': return False else: return open(name, mode)
910c7f46581a8652d11a76c2b07d9430cb436037
71,196
def fibonacci(n: int) -> int: """Iterative implementation of Fibonacci algorithm. fibonacci(0) = 0 fibonacci(1) = 1 :param n: positive integer """ first, sec = 0, 1 if n == 0: return first elif n == 1: return sec else: for i in range(2, n): temp = first first, sec = sec, temp + sec return sec
c8c7495fa10bd9599fdafe7f24388aa10c7c8e31
71,197
def get_col(model, attribute_name): """Introspect the SQLAlchemy ``model``; return the column object. ...for ``attribute_name``. E.g.: ``get_col(User, 'email')`` """ return model._sa_class_manager.mapper.columns[attribute_name]
a2e81d77e3f0b7c796b3cbc65b0b792049dcf8af
71,200
def gravity_to_plato(gravity: float) -> float: """ Convert gravity to degrees plato. """ return 259.0 - (259.0 / gravity)
6b14c26923eaf525b9b1fc14e3e2e22b9efa7e2d
71,205
import json def _process_task_events(task_id, prev_events, tfr_client): """ Process a globus task event list. This splits the events up into message events, which are events where the details field is a string, and json events, which is where the details field is a json object. Parameters ---------- prev_events : `set` A set of already processed events. tfr_client : `globus_sdk.TransferClient` The transfer client to use to get the events. Returns ------- prev_events : `set` The complete list of all event processed so far. json_events : `tuple` of `dict` All the events with json bodies. message_events : `tuple` of `dict` All the events with message bodies. """ # Convert all the events into a (key, value) tuple pair events = set(map(lambda x: tuple(x.data.items()), tfr_client.task_event_list(task_id, None))) # Drop all events we have seen before new_events = events.difference(prev_events) # Filter out the events which are json (start with {) json_events = set(filter(lambda x: dict(x).get("details", "").startswith("{"), new_events)) # All the other events are message events message_events = tuple(map(dict, (new_events.difference(json_events)))) def json_loader(x): """Modify the event so the json is a dict.""" x['details'] = json.loads(x['details']) return x # If some of the events are json events, load the json. if json_events: json_events = tuple(map(dict, map(json_loader, map(dict, json_events)))) else: json_events = tuple() return events, json_events, message_events
b6d676af2b05ca4781c1e64cca35af6fb66004d3
71,206
import json def GetComponentsToBuild(path): """Parse components from config file. Args: path: (str) file path to config file. Returns: Object: a json object of config file content. """ with open(path) as f: return json.load(f)
3e7609803f9630146f3be3959c00aacc899a6aa0
71,208
def _get_image_extra_build_command_options(image_options, ns): """ Render extraBuildCommandOptions from chartpress.yaml that could be templates, using the provided namespace that contains keys with dynamic values such as LAST_COMMIT or TAG. Args: image_options (dict): The dictionary for a given image from chartpress.yaml. Strings in `image_options['extraBuildCommandOptions']` will be rendered and returned. ns (dict): the namespace used when rendering templated arguments """ options = image_options.get("extraBuildCommandOptions", []) return [str(option).format(**ns) for option in options]
21df31c05368b29deb1a919abc3a7f904adeb18d
71,210
def dict_key_editer(var): """ Helper function for changing MIMIC-II specific measures to align with MIMIC-III. MIMIC-II has no validation set, hence test losses produced during training. """ if var == "loss_test": return "loss_dev" else: return var
d0a8d902aa330563a93f579ef7c5487ec744aaf9
71,212
def get_table_number_of_rows(cursor, table_name): """ Return the number of rows of a table """ sql_command = f"SELECT COUNT(*) FROM '{table_name}';" v = cursor.execute(sql_command).fetchall() assert len(v) == 1 assert len(v[0]) == 1 return v[0][0]
310d64d0b25c8729a838a5c5335b80f3a0bb8e6a
71,219
import base64 def encode_bytes(_bytes : bytes) -> str: """ Encode a bytes to a bytestring Parameters ---------- _bytes : ``bytes`` Returns ------- bytestr : ``str`` bytestring representation of ``array`` """ return base64.encodebytes(_bytes).decode("utf-8")
b5e4d0cf4b6461f2be016507eb715fe316ea1766
71,220
from pathlib import Path def enterprise_installer() -> Path: """ Return the path to an installer for DC/OS Enterprise master. """ return Path('/tmp/dcos_generate_config.ee.sh')
066a24155132ac69a8f5cd4a8ddbd75a2dc62135
71,227
from typing import List def recall_at_k(ranks: List[int], k: int) -> float: """Computes the percentage of comparative sets where the positive image has a rank of <= k Args: ranks: List of ranks of the positive example in each comparative set k: Threshold below which the rank should be counted as true positive Returns: Percentage of comparative sets with rank <= k """ below_threshold = [x for x in ranks if x <= k] percent_in_top_k = round(100.0 * len(below_threshold) / len(ranks), 1) return percent_in_top_k
1b82fa66dccf1d9b3256c0146d4947e9b1b2b683
71,229
def compute_min_distance_mendelian(proband_allele, parent_alleles): """Commute the smallest distance between the given proband STR expansion size, and parental STR expansion size. Args: proband_allele (int): the proband's allele length. parent_alleles (list of allele sizes): list of parental allele lengths. Return: int: the smallest distance (in base-pairs) between one of the parent_alleles, and the proband_allele. """ return min([abs(int(proband_allele) - int(pa)) for pa in parent_alleles])
ff8004a0e4f539e98bdbe7e2f5df14838a599cbd
71,231
import re def update_page_number(url, page=1): # type: (str, int) -> str """Updates or appends the 'page' parameter for a URL""" pattern = r"page=(\d+)" return re.sub(pattern, "page={}".format(page), url) \ if re.search(pattern, url) \ else "{}&page={}".format(url, page)
4967cde346bb68ec79a2abf27a5a4a82ca4d1ab6
71,233
def corners_to_wh(prediction_bboxes): """ (x_left, y_left, x_right, y_right) --> (x_left, y_left, width, height) """ prediction_bboxes[:, 2] = prediction_bboxes[:, 2] - prediction_bboxes[:, 0] prediction_bboxes[:, 3] = prediction_bboxes[:, 3] - prediction_bboxes[:, 1] return prediction_bboxes
a600074ed0c3f7aa6f6fa2251b3050662bdeb255
71,234
import math def calculate_plane_size(aspect_ratio, fov, distance): """Calculates the width and height of a plane at the specified distance using the FOV of the frustrum and aspect ratio of the viewport. :param float aspect_ratio: The aspect ratio of the viewport. :param float fov: The FOV of the frustrum. :param float distance: The distance from the origin/camera of the plane to calculate. :rtype: A tuple of two floats: width and height: The width and height of the plane. """ # http://www.songho.ca/opengl/gl_transform.html # http://nehe.gamedev.net/article/replacement_for_gluperspective/21002/ # http://steinsoft.net/index.php?site=Programming/Code%20Snippets/OpenGL/gluperspective&printable=1 tangent = math.radians(fov) height = distance * tangent width = height * aspect_ratio return width * 2.0, height * 2.0
569c70fecf840c3f7333f9c02514e396614ea9e6
71,239
def get_user_is_authenticated(user): """Check if the user is authenticated. This is a compatibility function needed to support both Django 1.x and 2.x; Django 2.x turns the function into a proper boolean so function calls will fail. """ if callable(user.is_authenticated): return user.is_authenticated() else: return user.is_authenticated
4f980060a0ffc31a88868200b1937c2669cb5560
71,240
def extract_comparator_expr(comparative_step): """Extract comparator and numeric expression of a comparative QDMR step Parameters ---------- comparative_step : str string of the QDMR comparative step Returns ------- str returns string representation of the comparator expression """ comparator = None if 'at least' in comparative_step: comparator = '>=' elif 'at most' in comparative_step: comparator = '=<' elif ('more' in comparative_step) or \ ('higher' in comparative_step) or ('larger' in comparative_step): comparator = '>' elif ('less' in comparative_step) or \ ('smaller' in comparative_step) or ('lower' in comparative_step): comparator = '<' elif ('not ' in comparative_step) and (('same as' in comparative_step) or \ ('equal' in comparative_step) or ('is' in comparative_step) or \ ('was' in comparative_step) or ('are' in comparative_step)): comparator = '!=' elif ('not ' not in comparative_step) and (('same as' in comparative_step) or \ ('equal' in comparative_step) or ('is' in comparative_step) or \ ('was' in comparative_step) or ('are' in comparative_step)) and \ ('any' not in comparative_step): comparator = '=' elif ('contain' in comparative_step): comparator = 'CONTAINS' else: comparator = 'FILTER' return comparator
4eefa45ccc15b6f241fe216e5b58e0947765af0c
71,242
def get_unique_histogram_value(histogram): """If histogram has a unique value, returns that value. Otherwise returns a string of the format "Not Unique. {count: <number of values>, sampleValues: <a representative sample of values>}". If no value is found, returns an empty string. The decision to return a string instead of raising an exception in these failure cases is intentional. The json results produced by cluster telemetry / chrome trace processor pipeline often has all kinds of errors, and we don't want to choke on them, but we also want to be aware of their presence so we can fix the errors if possible. """ if 'running' in histogram: running_stats = histogram['running'] running_max = running_stats[1] running_min = running_stats[4] if running_min == running_max: return running_min else: return "Not Unique. count: {count}, sampleValues: {sampleValues}".format( count=running_stats[0], sampleValues=histogram.get('sampleValues', [])) return ''
c7db45fc09eb5d85009f8d9fbe4b31af168a5c88
71,248
def term_A(P0, e0): """Term A in the main equation. P0 is the atmospheric pressure at the site (in hPa). e0 is the water vapor pressure at the site (in hPa)""" return 0.002357 * P0 + 0.000141 * e0
5cf80199ad432ec8b8b5a34d383c7bfe59a81bd2
71,251
from typing import List import colorsys def get_colors(n_colors: int, alpha: float = 1.0) -> List[str]: """Get a list of colors to use in plotting. Args: n_colors: How many colors to return. alpha: What opacity value to use (0 to 1). Returns: A list of rgba string colors. """ if n_colors <= 10: colors = [f'rgba(1,115,178,{alpha})', f'rgba(222,143,5,{alpha})', f'rgba(2,158,115,{alpha})', f'rgba(213,94,0,{alpha})', f'rgba(204,120,188,{alpha})', f'rgba(202,145,97,{alpha})', f'rgba(251,175,228,{alpha})', f'rgba(148,148,148,{alpha})', f'rgba(236,225,51,{alpha})', f'rgba(86,180,233,{alpha})'] else: colors = [(i + 0.01) / n_colors for i in range(n_colors)] colors = [color - 1 if color >= 1 else color for color in colors] colors = [colorsys.hls_to_rgb(color, 0.6, 0.95) for color in colors] colors = [f'rgba({int(256*r)},{int(256*g)},{int(256*b)},{alpha})' for r, g, b in colors] return colors[:n_colors]
ee2c20b9132ec29336ce03c54c8d0d3b103c36c7
71,253
def get_region_labels_txt(regions_file): """Get the labels for custom ILAMB regions from a text file. Parameters ---------- regions_file : str A text file containing ILAMB custom region definitions. Returns ---------- list A list of custom region labels. """ labels = [] with open(regions_file, 'r') as fp: lines = fp.readlines() for line in lines: labels.append(line.split(',')[0]) return labels
96b41c02fb4cd76862463a1bb2e0d0fd465dcfa4
71,255
def clear_aromatic_S(my_mol): """Function to replace an aromatic sulphur with an aromatic oxygen with an unusual isotope Takes an RDKit molecule Returns the updated molecule""" for atom in my_mol.GetAtoms(): if atom.GetIsAromatic()is True and atom.GetAtomicNum() == 16: atom.SetAtomicNum(8) atom.SetIsotope(17) return my_mol
f8614c49800ae3c7c41ffacf1fa5463220757502
71,257
def train(features, labels, model, loss_fun, optimizer, n_epochs): """Training function Args: features (torch.Tensor): features (input) with shape torch.Size([n_samples, 1]) labels (torch.Tensor): labels (targets) with shape torch.Size([n_samples, 1]) model (torch nn.Module): the neural network loss_fun (function): loss function optimizer(function): optimizer n_epochs (int): number of training iterations Returns: list: record (evolution) of training losses """ loss_record = [] # keeping recods of loss for i in range(n_epochs): optimizer.zero_grad() # set gradients to 0 predictions = model(features) # Compute model prediction (output) loss = loss_fun(predictions, labels) # Compute the loss loss.backward() # Compute gradients (backward pass) optimizer.step() # update parameters (optimizer takes a step) loss_record.append(loss.item()) return loss_record
6e7ca301cf88cc5889e94cfa7f59bdf890d12089
71,260
def sign(number): """Returns the sign of a number: +1 or -1. """ return int(int((number) > 0) - int((number) < 0))
f909d94e7f92fcf924221a95308d73cccdf18c17
71,269
def _parse_chap_header(headers): """Parses the X-CHAP header. CHAP headers are encoded like: X-CHAP:request:negz X-CHAP:challenge:butts X-CHAP:response:moo X-CHAP:token:zomgauthentication Each HTTP request or response should have a single X-CHAP header. Args: headers: A case insensitive dictionary of HTTP headers. Returns: A tuple like (chap_header_key, chap_header_value). """ return tuple([s.strip() for s in headers['X-CHAP'].split(':', 1)])
68e4bd2d5605f78bd910ecea3f610e7e0eaaa408
71,276
def calculate_utilization(billable_hours, target_hours): """Calculates utilization as hours billed divided by target hours""" if target_hours == 0: return "Non-billable" if target_hours is None: return 'No hours submitted.' if not billable_hours: return '0.00%' return '{:.3}%'.format((billable_hours / target_hours) * 100)
3a411892e92e1ffeae48661ef5df5c89062f24c0
71,277
def virtup_identity_file(instance): """ Get the ssh identity_file path on the virt_up system. """ return instance.meta['user']['ssh_identity']
15bff5c5e840ce1b5811d551d0e616859e397eeb
71,288
from typing import List from typing import Set def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]: """ Compares two arrays of integers, and returns a set of unique common members :param List[int] a: An array you want to compare against :param List[int] b: The other array you want to compare against :return Set[int]: """ a_set = set(a) b_set = set(b) if a_set & b_set: return a_set & b_set else: return set()
bc59ccffd8966cc53d75e517b5d5cfa331474dff
71,290
def char_histogram(string): """ funcion which takes a string and returns a dictionary, where each key is a character from string and its value is the number of occurrences of that char in string. """ histogram = {} for ch in string: if ch not in histogram: histogram[ch] = 1 else: histogram[ch] += 1 return histogram
f660d5cb56898104a401280a5bfa38e1aaa5ccd1
71,297
def createWindDict(windFile): """Parses ``winds.txt`` to obtain station id and latitude, longitude information. Args: windFile (str): Path for ``winds.txt``. Returns: dict: Dictionary with station name for key and list for value containing [lng, lat]. """ windDict = {} with open(windFile, 'r') as f: for line in f: line = line.strip() if line.startswith('#') or (len(line) == 0): continue lineParts = line.split(',') stationId = lineParts[0] latitude = lineParts[1] longitude = lineParts[2] windDict[stationId] = [longitude, latitude] return windDict
cf136e36604d80688e26d2902ce4e4708d10b57a
71,299
def noNamespace(id:str) -> str: """ Remove the namespace part of an identifier and return the remainder. Example: 'm2m:cnt' -> 'cnt' """ p = id.split(':') return p[1] if len(p) == 2 else p[0]
4cde322e722c528e422823aefd773e06afc3035a
71,303
import torch def eval_metrics(preds: torch.Tensor, labels: torch.Tensor) -> dict: """Calculate metrics: precision, recall, f1, accuracy""" tp = ((preds == 1) & (labels == 1)).cpu().sum().item() fp = ((preds == 1) & (labels == 0)).cpu().sum().item() fn = ((preds == 0) & (labels == 1)).cpu().sum().item() tn = ((preds == 0) & (labels == 0)).cpu().sum().item() precision = tp / (tp + fp) if (tp + fp) else 0.0 recall = tp / (tp + fn) if (tp + fn) else 0.0 f1 = 2 * precision * recall / (precision + recall) if (precision + recall) else 0.0 acc = (tp + tn) / (tp + fp + fn + tn) if (tp + fp + fn + tn) else 0.0 return {"precision": precision, "recall": recall, "f1": f1, "acc": acc}
dc0c641dba387f77f5c2af67835953d2602fc10a
71,307
def flatten_object_hierarchy(obj): """ Flatten obj hierarchy. :param obj: a blender obj :return: a list containing the obj and all children. """ def preorder(obj, objects): objects.append(obj) for c in obj.children: preorder(c, objects) objects = [] preorder(obj, objects) return objects
9f0152da275f0a1ce100753eefc40dcf445223dd
71,309
def parse_categorised_lists( data, header_formatter, formatter, list_parser, sorted_keys=None ): """Parses each element in data using a formatter function. Data is a dict, each key is a category and each value is a list of dicts. Adds a header for each category. """ if sorted_keys is None: sorted_keys = sorted(data.keys(), reverse=True) output = "".join( [ header_formatter(key) + list_parser(data[key], formatter) for key in sorted_keys ] ) return output
808a52203f9db9df9823299acc68ea2d8daa79da
71,319
from typing import Dict def add_to_words_subtree(words_subtree: Dict, rest_of_word: str) -> Dict: """ >>> add_to_words_subtree({}, 'b') {'b': {'.': None}} >>> add_to_words_subtree({}, 'bar') {'b': {'a': {'r': {'.': None}}}} >>> add_to_words_subtree({'r': {'.': None}}, 'n') {'r': {'.': None}, 'n': {'.': None}} >>> add_to_words_subtree({'r': {'.': None}}, 'ns') {'r': {'.': None}, 'n': {'s': {'.': None}}} >>> add_to_words_subtree({'b': {'a': {'r': {'.': None}}}}, 'be') {'b': {'a': {'r': {'.': None}}, 'e': {'.': None}}} >>> add_to_words_subtree({'b': {'a': {'r': {'.': None}}}}, 'am') {'b': {'a': {'r': {'.': None}}}, 'a': {'m': {'.': None}}} """ if len(rest_of_word) == 0: return {'.': None} letter = rest_of_word[0] next_subtree = words_subtree.get(letter, {}) words_subtree[letter] = add_to_words_subtree(next_subtree, rest_of_word[1:]) return words_subtree
3c7d9bddcbc465d4c5c9de40df1f006ed093182e
71,328
def tong_pso(n): """Tinh tong 1/1+1/2+1/3+...+1/n""" sum = 0 for i in range(1,n+1): sum+=1/i return sum
3dae333cf8b2d2a67bd1c16396fe9d8679bba96b
71,329
def find_disappeared_numbers(nums: list[int]) -> list[int]: """Returns the integers in the range [1, n] that do not appear in nums. Best space complexity, but runtime constants worse Complexity: n = len(nums) Time: O(n) Space: O(1) Args: nums: array of n integers where `nums[i]` is in the range [1, n] Note: if any number is missing, that implies some other number is duplicated Examples: >>> find_disappeared_numbers([4,3,2,7,8,2,3,1]) [5, 6] >>> find_disappeared_numbers([1,1]) [2] """ """ALGORITHM""" def swap_elements(i, j): nums[i], nums[j] = nums[j], nums[i] ## INITIALIZE VARS ## n = len(nums) ## CYCLIC SORT curr_idx = 0 while curr_idx < n: target_idx = nums[curr_idx] - 1 # make 0-indexed # note: target_idx always < n since max(nums) ≤ n # so we can remove bounds checks typically needed in cyclic sort subroutines # Since the numbers may be duplicates, # prevent cycles by skipping swaps with elements that are # already in the correct location: # 1. swaps with self (i.e., current element already in the correct location) # 2. swaps with duplicate elements (future element already in correct location) if curr_idx != target_idx and target_idx != nums[target_idx] - 1: swap_elements(curr_idx, target_idx) else: curr_idx += 1 ## FIND DISAPPEARED NUMBERS disappeared_numbers = [] for expected_num, actual_num in enumerate(nums, start=1): if expected_num != actual_num: disappeared_numbers.append(expected_num) return disappeared_numbers
631727bf874e8434d27ff40427bcdd174b5a29f2
71,332
import json def generate_report(issues, fuzzer_name, job_type): """Generate a simple json representing performance analysis results. Args: issues: List of performance issues with corresponding scores and attributes. fuzzer_name: Fuzzer name. job_type: Job name. Returns: A json report for the given performance issues. """ report_dict = { 'fuzzer_name': fuzzer_name, 'job_type': job_type, 'issues': issues, } return json.dumps(report_dict, indent=2)
a9c4f9ee0aabc8def17f5cb1790b36adfdf103b7
71,334
def compute_bytes_per_voxel(file_data_type): """Returns number of bytes required to store one voxel for the given ElementType """ switcher = { 'VolumeDataType_Float': 4 } return switcher.get(file_data_type, 2)
608997b6de06ac1dfda01db40064e79f89c17f32
71,343
def gmt(line): """parse a gmt line into id, name, gene symbols""" result = line[:-1].split("\t") return result[0], result[1], result[2:]
327203ef79be6b1447b8751b6785d6ffc252027a
71,344
def getTextFile(filep): """ Opens the text file and goes through line by line, appending it to the svgfile list. Each new line is a new object in the list, for example, if the text file was ---- hello world this is an awesome text file ---- Then the list would be ["hello", "world", "this is an awesome text file"] Each line is a new object in the list """ file = open(filep) svgfile = [] while 1: line = file.readline() if not line: break svgfile.append(line) #Go through entire SVG file and import it into a list return svgfile
22cce76cb9afcc82570a1d5130f88cd9a5333c1a
71,350
from typing import Iterable def no_ctrl(args: Iterable[str]) -> bool: """Are args all without control cost?""" return all("NoCtrl" in arg for arg in args)
58837d40682aab8f0c4f9a0c7f8712d51bc20d1d
71,351
import inspect def _indented_list(contents: str) -> list[str]: """ Convert a list string into individual (dedented) elements. For example, foo: desc bar: int more desc baz: desc indented returns [ "foo:\ndesc", "bar: int\nmore desc", "baz:\ndesc\n indented", ] """ # we expect this to be through cleandoc() already. assert not contents.startswith(" ") assert not contents.startswith("\n") assert "\t" not in contents ret: list[str] = [] for line in contents.splitlines(keepends=True): empty = not line.strip() indented = line.startswith(" ") if not (empty or indented): # new section ret.append(line) else: # append to current section ret[-1] += line return [inspect.cleandoc(x) for x in ret]
90f33d26eec4a2f025532c8db326b2c8ec6710ec
71,357
def format_column_names(df): """ standardize pandas df columns names. """ def reformat(txt): txt = txt.lower() txt = txt.replace(' ', '_') txt = txt.replace('-', '_') txt = txt.replace('.', '_') return txt columns = {col: reformat(col) for col in df.columns} df.rename(columns, axis=1, inplace=True)
463398ff48699318ee4bf05e6fb823e05feb9ae0
71,362
def Area(rectangle): """ Returns rectangle area. Args: rectangle: Rectangle Returns: Area """ w = rectangle[2] - rectangle[0] h = rectangle[3] - rectangle[1] return w * h
6dd7b1c0fbff39ac26e7eb514ca810496b060c2d
71,365
import json def toJson(jsonString): """ Takes json or array as a string and returns it as json so it can be parsed in loop. Example usage: {{ value|toJson }} Return: {json} """ return json.loads(jsonString)
35900c2126cb7cae8d00f235deae38750488125b
71,366
def scale_mesh(mesh): """Scale normalize the input mesh to be centered at (0,0,0) and fits in a sphere of 1 """ verts = mesh.verts_packed() N = verts.shape[0] center = verts.mean(0) scale = max((verts - center).abs().max(0)[0]) mesh.offset_verts_(-center.expand(N, 3)) mesh.scale_verts_((1.0 / float(scale))) return mesh
a8b5119d9c1aba21158864d5c06ae51d31dd7765
71,367
import re def get_root_of_unsplitable(path): """ Scans a path for the actual scene release name, e.g. skipping cd1 folders. Returns None if no scene folder could be found """ path = path[::-1] for p in path: if not p: continue if re.match(r'^(cd[1-9])|(samples?)|(proofs?)|((vob)?sub(title)?s?)$', p, re.IGNORECASE): # scene paths continue if re.match(r'^(bdmv)|(disc\d*)|(video_ts)$', p, re.IGNORECASE): # bluray / dd continue return p
1b215905a29b308b3043e00c3ef307ff035ac8ad
71,370
import threading import copy def deepcopy(obj) -> object: """ Thread safe deep copy of an object :param obj: the object to deep copy :return: a deep copy of the object """ lock = threading.Lock() with lock: return copy.deepcopy(obj)
9f4cad16518b9754cc43c442e0eecdc046ffd085
71,375
def decompress_elements(elements): """ Receives a tuple ('CE-2015-12', {'chuvas': [7.6], 'dengue': [29.0]}) Returns a tuple ('CE', '2015', '11', '0.4', '21.0') """ key, data = elements chuvas = data['chuvas'][0] dengue = data['dengue'][0] uf, year, month = key.split('-') return uf, year, month, str(chuvas), str(dengue)
57a3056828b0f4239519994cd48c33c8697c19a3
71,379
def grep(regex_name, regex, data): """Run regex :param regex_name: Name of regex :type regex_name: `str` :param regex: Regex :type regex: :param data: Data :type data: `str` :return: Output from grep :rtype: `dict` """ results = regex.findall(data) output = {} if results: output.update({regex_name: results}) return output
dd3d16de3f723d870d1d04c13c4261680ba93f54
71,386
from typing import List from pathlib import Path import glob def _gather_source_files(directory_path: str) -> List[Path]: """Returns a list of all python files in the tree starting from the provided directory_path Args: directory_path: the root directory of a python project. Returns: A list of python file paths. """ return [Path(_) for _ in glob.glob(f"{directory_path}/**/*.py", recursive=True)]
ac06eb0805c6cd85c1ab5cb5ad515084c473f173
71,388
def _trimxy_ranges(x, y, ranges): """ " Parameters ---------- x : array-like x y : array-like y ranges : list, list[list] ranges in x as [xmin, xmax] to which y and y will be trimmed Returns ------- trimmed_xy : list, list[list] x and y trimmed to ranges """ if not isinstance(ranges[0], list): trim = (x > ranges[0]) & (x < ranges[1]) return [x[trim], y[trim]] trimmed_xy = [] for range in ranges: trim = (x > range[0]) & (x < range[1]) trimmed_xy.append([x[trim], y[trim]]) return trimmed_xy
b04968ccd8ed2a601dcadb37348d46cc3fd10d80
71,391
from pathlib import Path from typing import Dict import json def load_json_object(json_path: Path) -> Dict: """Attempts to load a single JSON object from a file. Raises a TypeError if the JSON parsing is successful but the file contained a JSON array. Keyword arguments: json_path (Path): Path to file to load a json object from. """ with json_path.open("r") as infile: json_dict = json.load(infile) if not isinstance(json_dict, dict): raise TypeError("Contents of ", json_path, " is not one JSON object") return json_dict
a42a4511d4e6a320a0d5f1d2de711287ccba0b39
71,394
def importNums(filename): """assumes filename is the name of a file in the filepath, formated with a single numeric per line and a blank line at the end returns a list of the numerics in filename """ num_list = [] with open(filename) as file: for line in file: num_list.append(float(line[:-1])) return num_list
43077b999c915b65f3195157a59927908e62cec1
71,399
def pos_in_rect(rect, pos): """Return True if pos is in the rectangle""" pos_x, pos_y = pos x, y, width, height = rect return (x <= pos_x <= x + width and y <= pos_y <= y + height)
108d214965f3a4172bd5bc4608ec9b2c48908c10
71,400
import inspect def _first_param_empty(signature): """Check whether the first argument to this call is empty, ie. no with a default value""" try: first = next((signature.parameters.items()).__iter__()) return first[1].default == inspect._empty except StopIteration: return True
052db9e7db8ceaf78af58a378f0ebd3f7994bab8
71,401
def define_additional_meta(nlamps=20): """ Defines meta that tends to be instrument-specific and not used as widely in the code. See :func:`define_core_meta` for additional details For meta used to define configurations, the rtol key specifies the relative tolerance for a match Args: nlamps (:obj:`int`, optional): Number of calibrations lamps for this instrument. Returns: :obj:`dict`: Describes the additional meta data used in pypeit. """ additional_meta = {'dichroic': dict(dtype=str, comment='Beam splitter'), 'filter1': dict(dtype=str, comment='First filter in optical path'), 'dispangle': dict(dtype=float, comment='Angle of the disperser', rtol=0.), 'hatch': dict(dtype=str, comment='Position of instrument hatch'), 'slitwid': dict(dtype=float, comment='Slit width, sometimes distinct from decker'), 'detector': dict(dtype=str, comment='Name of detector'), 'arm': dict(dtype=str, comment='Name of arm (e.g. NIR for X-Shooter)'), 'datasec': dict(dtype=str, comment='Data section (windowing)'), 'idname': dict(dtype=str, comment='Instrument supplied frametype (e.g. bias)')} for kk in range(nlamps): additional_meta['lampstat{:02d}'.format(kk+1)] \ = dict(dtype=str, comment='Status of a given lamp (e.g off/on)') additional_meta['lampshst{:02d}'.format(kk + 1)] \ = dict(dtype=str, comment='Status of a lamp shutter (e.g closed/open)') return additional_meta
4070acdec231ac144dbac79a657f0d8e120122e6
71,402
import requests def get_soundcloud_data(url): """ Scrapes a SoundCloud page for a track's important information. Returns: dict: of audio data """ data = {} request = requests.get(url) title_tag = request.text.split('<title>')[1].split('</title')[0] data['title'] = title_tag.split(' by ')[0].strip() data['artist'] = title_tag.split(' by ')[1].split('|')[0].strip() # XXX Do more.. return data
8fff54dc0c3bdf0e40a282f6f2aab8efc2035e26
71,403