content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import six def compact(seq): """ Removes ``None`` values from various sequence-based data structures. dict: Removes keys with a corresponding ``None`` value. list: Removes ``None`` values. >>> compact({'foo': 'bar', 'baz': None}) {'foo': 'bar'} >>> compact([1, None, 2]) [1, 2] """ if isinstance(seq, dict): return {k: v for k, v in six.iteritems(seq) if v is not None} elif isinstance(seq, list): return [k for k in seq if k is not None]
0c4d53a409e38ff49517087eca40d8f8f842e8b6
88,904
def remove(bowl_a, bowl_b): """Return bowl b without the "ingredients" of bowl_a.""" for ingredient in bowl_a: # If an ingredient is also in bowl_a and bowl_b - remove it. if ingredient in bowl_b: bowl_b.remove(ingredient) return bowl_b
5480c6680f9cac9fd033dedacc14da5bc58fc551
88,905
from typing import Optional from pathlib import Path from typing import Tuple def _load_root_datadir( datadir_txt: Optional[Path], ) -> Tuple[Optional[Path], Optional[Path]]: """ Locate the root and datadir, returning Paths if possible. Uses the location of the datadir.txt file to define the data directory and the 'root' directory. Parameters ---------- datadir_txt: The location of the datadir.txt file, if loaded. Returns ------- A tuple containing (rootdir, datadir). Each tuple component is None if the desired directory is not a accessible directory. """ if datadir_txt is None: return (None, None) rootdir = datadir_txt.parent datadir = Path(datadir_txt.read_text()) return (rootdir if rootdir.is_dir() else None, datadir if datadir.is_dir() else None)
af8d1b7dfee31f1cec398f2f9fc3f2b0f817ca78
88,909
def halfstring(inputstr): """Return the first half of a string.""" strlen = len(inputstr) return inputstr[:strlen / 2]
43e6113ebed1e0545d296e4789ee3a274e2754ab
88,910
def check_suffix_exists(obj_name, suffix): """ Checks whether given suffix in given Maya node or not :param obj_name: str, name of the object to check suffix of :param suffix: str, suffix to check :return: bool, Whether given suffix already exists in given node or not """ base_name_split = obj_name.split('_') if base_name_split[0] == suffix.replace('_', ''): return True return False
d74fd2acfe7f2ad3ae62daecb5339f91ed93185e
88,912
def remove_nulls(dictionary, deep=True): """ Removes all null values from a dictionary. """ return { k: remove_nulls(v, deep) if deep and type(v) is dict else v for k, v in dictionary.items() if v is not None }
9e3a96073cbbffc61dd854cc398518dec85e216e
88,915
def to_dict_query(self) -> list: """Returns all SQLAlchemy records in a query as dictionaries.""" return [row.to_dict() for row in self.all()]
c9e31579a5d9ba90d6f2618a926cd64c622c18d7
88,917
import json def read_config(config_path): """Reads config :param config_path: path to configuration file :returns: dict with configuration data """ with open(config_path, "r") as fo: config = json.loads(fo.read()) return config
b31eff389072de03e0bb5815e5cf1473f39d14c6
88,925
def positive_index(index, size): """ Return a positive index from any index. If the index is positive, it is returned. If negative, size+index will be returned. Parameters ---------- index : int The input index. size : int The size of the indexed dimension. Returns ------- int A positive index corresponding to the input index. Raises ------ ValueError If the given index is not valid for the given size. """ if not -size <= index <= size: raise ValueError( "Invalid index {} for size {}".format(index, size)) if index < 0: index = size + index return index
d2c21e8e819926fc1904a2176a633007b3ef7a36
88,926
def _has_valid_syntax(row): """ Check whether a given row in the CSV file has the required syntax, i.e. - lemma is an existing string object - example sentence is a string and contains at least one pair of lemma markers in the right order - score is one of the following strings: '0', '1', '2', '3', '4' :param row: a dictionary containing all values in a CSV row """ lemma = row['Lemma'] sentence = row['Example'] score = row['Score'] # check lemma if lemma is None or type(lemma) != str: return False # check sentence if sentence is None or type(sentence) != str or not sentence.__contains__('_&') or not sentence.__contains__('&_'): return False i = sentence.index('_&') j = sentence.index('&_') if i >= j: return False # check score valid_scores = ['0', '1', '2', '3', '4'] if score is None or type(score) != str or score not in valid_scores: return False return True
0ca5ddf476ca4aeb4e1387a340c8aec6d38d7950
88,927
from pathlib import Path def test_images_dir() -> Path: """ Returns the filesystem path corresponding to the location of test VASP images. DiSPy-API (root) -> tests -> test_images """ return Path(__file__).parent.parent.parent / "tests/test_images"
c4c17e55d115356ce2fb0fbf44550e1050ad4584
88,930
def get_report_png_by_id(triage_instance, report_id): """Fetch and return the PNG file associated with the specified report_id""" return triage_instance.request( f"reports/{report_id}.png", raw_response=True ).content
2970f7cdbc19150d000a37af805064e5783cff54
88,931
def _startswith_and_remainder(string, prefix): """Returns if the string starts with the prefix and the string without the prefix.""" if string.startswith(prefix): return True, string[len(prefix):] else: return False, ''
4a982ae5780f91f24972e3791e069a6ce9e09b6b
88,936
import torch def softmax_sample(x): """Sample the categorical distribution from logits and sample it.""" dist = torch.distributions.OneHotCategorical(logits=x) return dist.sample()
a5815a341f1f089266bdcf5fcc8afdf2e408f4b1
88,940
import unittest def build_suite(classes): """Creates a TestSuite for all unit test classes in the list. Assumes that each of the classes in the list has unit test methods which begin with 'test'. Calls unittest.makeSuite. Returns: A new unittest.TestSuite containing a test suite for all classes. """ suites = [unittest.makeSuite(a_class, 'test') for a_class in classes] return unittest.TestSuite(suites)
1410f181c318d0545b3d29d5cdbe36d74151ea6c
88,941
def normalize_connector_config(config): """Normalize connector config This is porting of TD CLI's ConnectorConfigNormalizer#normalized_config. see also: https://github.com/treasure-data/td/blob/15495f12d8645a7b3f6804098f8f8aca72de90b9/lib/td/connector_config_normalizer.rb#L7-L30 Args: config (dict): A config to be normalized Returns: dict: Normalized configuration Examples: Only with ``in`` key in a config. >>> config = {"in": {"type": "s3"}} >>> normalize_connector_config(config) {'in': {'type': 's3'}, 'out': {}, 'exec': {}, 'filters': []} With ``in``, ``out``, ``exec``, and ``filters`` in a config. >>> config = { ... "in": {"type": "s3"}, ... "out": {"mode": "append"}, ... "exec": {"guess_plugins": ["json"]}, ... "filters": [{"type": "speedometer"}], ... } >>> normalize_connector_config(config) {'in': {'type': 's3'}, 'out': {'mode': 'append'}, 'exec': {'guess_plugins': ['json']}, 'filters': [{'type': 'speedometer'}]} """ if "in" in config: return { "in": config["in"], "out": config.get("out", {}), "exec": config.get("exec", {}), "filters": config.get("filters", []), } elif "config" in config: if len(config) != 1: raise ValueError( "Setting sibling keys with 'config' key isn't support. " "Set within the 'config' key, or put all the settings without 'config'" "key." ) return normalize_connector_config(config["config"]) else: return {"in": config, "out": {}, "exec": {}, "filters": []}
a835d36a36a54b7978a8c1e7fcbef1ab19c6b7c1
88,943
def partition(arr, start, end, pivot_index): """ Partitions the array such that all elements before pivot_index are <= pivot element, and all the elements after are >= pivot element. The pivot element will come at its rightful position at the end. Returns the rightful index of the pivot. """ arr[start], arr[pivot_index] = arr[pivot_index], arr[start] pivot_index = start i = start + 1 while i <= end: if arr[i] < arr[start]: pivot_index += 1 arr[pivot_index], arr[i] = arr[i], arr[pivot_index] i += 1 # CAREFUL: Dont forget this step!! arr[pivot_index], arr[start] = arr[start], arr[pivot_index] return pivot_index
c4247ad74b8eccdaec0c5f1637561659e822431d
88,945
import io def read_placeholder(file_desc: io.BufferedReader, size=3) -> bytes: """ Read size bytes from file :param file_desc:file descriptor :param size:number of reading bytes :return: bytes """ return file_desc.read(size)
c4ecf2c56df87f9d839a41030312814e40552583
88,946
from typing import Dict def in_obiectum_planum( rem: Dict, pydictsep: str = '.', pylistsep: str = ' ') -> dict: """in_obiectum_planum Flatten a nested python object Trivia: - obiectum, https://en.wiktionary.org/wiki/obiectum#Latin - recursiōnem, https://en.wiktionary.org/wiki/recursio#Latin - praefīxum, https://en.wiktionary.org/wiki/praefixus#Latin - plānum, https://en.wiktionary.org/wiki/planus Args: rem (dict): The object to flatten pydictsep (pydictsep, optional): The separator for python dict keys pylistsep (pydictsep, optional): The separator for python list values Returns: [dict]: A flattened python dictionary Exemplōrum gratiā (et Python doctest, id est, testum automata): >>> testum1 = {'a0': {'a1': {'a2': 'va'}}, 'b0': [1, 2, 3]} >>> in_obiectum_planum(testum1) {'a0.a1.a2': 'va', 'b0': '1 2 3'} >>> in_obiectum_planum(testum1) {'a0.a1.a2': 'va', 'b0': '1 2 3'} >>> in_obiectum_planum(testum1, pylistsep=',') {'a0.a1.a2': 'va', 'b0': '1,2,3'} >>> in_obiectum_planum(testum1, pydictsep='->') {'a0->a1->a2': 'va', 'b0': '1 2 3'} # This is not designed to flat arrays, str, None, int, ..., only dict >>> in_obiectum_planum([1, 2, 3, 4]) Traceback (most recent call last): ... TypeError: in_obiectum_planum non dict<class 'list'> """ resultatum = {} if not isinstance(rem, dict): raise TypeError('in_obiectum_planum non dict' + str(type(rem))) def recursionem(rrem, praefixum: str = ''): praefixum_ad_hoc = '' if praefixum == '' else praefixum + pydictsep if isinstance(rrem, dict): for clavem in rrem: recursionem(rrem[clavem], praefixum_ad_hoc + clavem) elif isinstance(rrem, list): resultatum[praefixum] = pylistsep.join(map(str, rrem)) else: resultatum[praefixum] = rrem recursionem(rem) return resultatum
cd4b5aa46c38719207e6efbcf67c3c4a63286848
88,949
def find_delimiter_loc(delimiter, str): """Return a list of delimiter locations in the str""" out = [] pos = 0 while pos < len(str): if str[pos] == "%": out.append(pos) pos += 1 return out
d813ef6a667372578e6cc3878e9f30ba72b2766b
88,951
def convertCountry(zippedList,name): """Converts a country name to the corresponding country code""" #e.g. Russia --> RU codeIndex = [i[1] for i in zippedList].index(name) print(zippedList[codeIndex][0]) return zippedList[codeIndex][0]
82b27509dfeeb4f067a25f92abe3f1ea7592d31b
88,954
def read_file(path): """read files from path Args: path (str): full path of the file Returns: list(str): list of lines """ with open(path, "r", encoding="utf8") as f: lines = f.readlines() f.close() return lines
ca839bf9927cd397148f92e4ad2c65b422eb9207
88,955
def select_login_fields(fields): """ Select field having highest probability for class ``field``. :param dict fields: Nested dictionary containing label probabilities for each form element. :returns: (username field, password field, captcha field) :rtype: tuple """ username_field = None username_prob = 0 password_field = None password_prob = 0 captcha_field = None captcha_prob = 0 for field_name, labels in fields.items(): for label, prob in labels.items(): if label in ("username", "username or email") and prob > username_prob: username_field = field_name username_prob = prob elif label == "password" and prob > password_prob: password_field = field_name password_prob = prob elif label == "captcha" and prob > captcha_prob: captcha_field = field_name captcha_prob = prob return username_field, password_field, captcha_field
9bd97028a10736fc34958546ec7362128824673a
88,956
import torch as th def _topk_torch(keys, k, descending, x): """Internal function to take graph-wise top-k node/edge features according to the rank given by keys, this function is PyTorch only. Parameters ---------- keys : Tensor The key for ranking. k : int The :math:`k` in "top-:math:`k`". descending : bool Indicates whether to return the feature corresponding to largest or smallest elements. x : Tensor The padded feature with shape (batch, max_len, *) Returns ------- sorted_feat : Tensor A tensor with shape :math:`(batch, k, *)`. sorted_idx : Tensor A tensor with shape :math:`(batch, k)`. """ batch_size, max_len = x.shape[0], x.shape[1] topk_indices = keys.topk(k, -1, largest=descending)[1] # (batch_size, k) x = x.view((batch_size * max_len), -1) shift = th.arange(0, batch_size, device=x.device).view(batch_size, 1) * max_len topk_indices_ = topk_indices + shift x = x[topk_indices_].view(batch_size, k, -1) return th.masked_fill(x, th.isinf(x), 0), topk_indices
f23a681dbb22d422d659edcd4ccbb4b202473d68
88,965
import re def ConvertStringToFilename(name): """Converts an unicode string to a filesystem safe filename. For maximum compatibility we escape all chars which are not alphanumeric (in the unicode sense). Args: name: a unicode string that is part of a subject. Returns: A safe filename with escaped special chars. """ return re.sub( r"\W", lambda x: "%%%02X" % ord(x.group(0)), name, flags=re.UNICODE).rstrip("/")
2faaaa99178bcd8d202d97612912a4694fb8af3b
88,966
import json def json_to_dict(json_str): """Convert a JSON string to a dictionary if not already a dictionary. :param json_str: JSON string to convert. :type json_str: str :return: JSON string as a dictionary. :rtype: dict or None """ if json_str: if isinstance(json_str, dict): return json_str return json.loads(json_str) return json_str
91ee0fd4063b8aa4a2d665f2ebe8d2b81bac8206
88,967
import hashlib import base64 def get_sha256_checksum(byte_string): """ :param byte_string: byte-string to create checksum from :return: b32-encoded sh256-checksum as string """ sha256 = hashlib.sha256(byte_string) return base64.b32encode(sha256.hexdigest().encode("utf8")).decode("utf8").strip("=")
9251442fb23abcf61ceb40b4f711d15e53eec79c
88,968
def escape_quotes(string): """Escape double quotes in string.""" return string.replace('"', '\\"')
e6a2fc3cbc4dc93b03c6496e3e54fc3e3ba01145
88,971
def index_of_first_zero_bit_moving_right_to_left(ii): """ Return the first the index of the first zero bit of the integer ii when moving right to left. Returns index in [0,...,nbits(ii)-1] # x >> y Returns x with the bits shifted to the right by y places. # This is the same as //'ing x by 2**y. # x & y Return bitwise and """ jj = 1 while ((ii & 1) > 0): ii >>= 1 jj += 1 #print(ii, "{0:b}".format(ii), jj, (ii&1)>0) return jj-1
c0e89fa280cf050d1cbb945e3e52cbacae17cf3e
88,973
def get_act_sent_len(head_indexes_2d): """ Get actual sentence length (counting only head indexes) """ sent_len_list = [0] * len(head_indexes_2d) for idx, head_list in enumerate(head_indexes_2d): sent_len = 0 for item in head_list: if item != 0: sent_len += 1 sent_len_list[idx] = sent_len return sent_len_list
8a44f2564a59759666817b5bc0b26e471a32d3af
88,974
def is_versioned(obj): """ Returns whether or not given object is versioned. :param obj: SQLAlchemy declarative model object. """ return ( hasattr(obj, '__versioned__') and ( ( 'versioning' in obj.__versioned__ and obj.__versioned__['versioning'] ) or 'versioning' not in obj.__versioned__ ) )
f08e6db40283b46d6882429c045afe8af725c73e
88,976
def key2mongo(tup): """Return a string given an (int, string) plate-well key. Parameters ---------- tup : (int, string) tuple A (plate, well) identifier. Returns ------- mongo_id : string A string, suitable for representing a mongodb _id field. Examples -------- >>> tup = (2490688, 'C04') >>> key2mongo(tup) '2490688-C04' """ return str(tup[0]) + '-' + tup[1]
3f1f4145a596c2da0fa32f32c7f7e7e2c627319e
88,981
def parse_console_interface(console_interface): """ parse console interface Foramt: line_number.baud.flow_control_option Example: 27.9600.0 """ fields = console_interface.split('.') return fields[0], fields[1], fields[2]
21f26fa4d1100357dbc802891e0c1685653a7cb0
88,985
import torch def tensor_encode_id(img_id): """ Encodes a FathomNet image id like '00a6db92-5277-4772-b019-5b89c6af57c3' as a tensor of shape torch.Size([4]) of four integers in the range [0, 2^32-1]. """ hex_str = img_id.replace('-', '') length = len(hex_str) // 4 img_id_enc = tuple(int(hex_str[i * length: (i + 1) * length], 16) for i in range(4)) return torch.tensor(img_id_enc)
8df76c9ca0a59020254ca3946dce4834ef82208b
88,986
def rotate(list, places): """Shift the elements in a list. A positive place will move the list to the left, a negative place to the right.""" return list[places:] + list[:places]
d7c06f233d0f0946274f555b45b8ab0cb08fe62d
88,989
import json def search(es_object, index, search_params): """ Search the Elasticsearch index. """ json_search = json.dumps(search_params) result = es_object.search(index=index, body=json_search) return result
cd315f4b32ff364fb19e4bfaae70404e6eff19c6
88,997
from typing import Any def smallid(n: Any, seglen: int = 3, sep: str = "...") -> str: """Reduce a String into the first and last `seglen` characters, on either side of `sep`. smallid(1234567890, 3, "...") -> "123...890" """ ns = str(n) return f"{ns[:seglen]}{sep}{ns[-seglen:]}"
ffde5c4ab10d8ba0786453e076d76ff31cc0c7a4
88,999
def get_workdir_data(project_doc, asset_doc, task_name, host_name): """Prepare data for workdir template filling from entered information. Args: project_doc (dict): Mongo document of project from MongoDB. asset_doc (dict): Mongo document of asset from MongoDB. task_name (str): Task name for which are workdir data preapred. host_name (str): Host which is used to workdir. This is required because workdir template may contain `{app}` key. Returns: dict: Data prepared for filling workdir template. """ hierarchy = "/".join(asset_doc["data"]["parents"]) data = { "project": { "name": project_doc["name"], "code": project_doc["data"].get("code") }, "task": task_name, "asset": asset_doc["name"], "app": host_name, "hierarchy": hierarchy } return data
ff87d9f2b0f0639143024947a7096068b7d46e17
89,002
def excel_column(column: int) -> str: """Convert column position (indexed from 0) to Excel column name. Args: column (int): Column index (position from 0). Returns: str: Definition of column in Excel. """ letter_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [] column += 1 while column: column, rem = divmod(column - 1, 26) result[:0] = letter_list[rem] return ''.join(result)
e49a3735af8ead3519d3019ce292a56ba697dd72
89,003
import csv def get_csv_collection(filename, **kwargs): """ Loads a record collection from a CSV file. The CSV file MUST include columns for Artist, Title, and Year Arguments: filename (str) - CSV filename **kwargs (dict) - Optional kwargs: skip=0, don't load the first <skip> rows Returns: list - List of record dicts """ skip = kwargs.get('skip', 0) collection = [] with open(filename, newline='') as collection_file: collection_dict = csv.DictReader(collection_file) count = 1 for record in collection_dict: if count <= skip: count += 1 continue collection.append({'artist': record['Artist'], 'release_title': record['Title'], 'year': record['Year'], 'type': 'release', 'country': 'US'}) count += 1 return collection
72761b39b200bb94e9759ffc63a1cf52beb7c264
89,009
from pathlib import Path def get_ref_file() -> str: """Read the ref file, cached to reduce amount of times we need to read the file into memory Returns: str: ref file contents """ ref_file_path = Path.cwd() / '.palm/model_template/ref_files/ref_file.sql' if not ref_file_path.exists(): return '' contents = ref_file_path.read_text() return contents
2493ecdff96ccba568d201cbd003a43009391f94
89,013
def sum_of_lists(lists): """Aggregates list of numeric lists by summing.""" if len(lists) == 1: return lists[0] # Preserve datatype size = len(lists[0]) init_val = type(lists[0][0])(0.0) total = [init_val] * size for ll in lists: for i in range(size): total[i] += ll[i] return total
118383372d0179ff62844899ad96186387d38728
89,015
def get_digits(n, reverse=False): """Returns the digits of n as a list, from the lowest power of 10 to the highest. If `reverse` is `True`, returns them in from the highest to the lowest""" digits = [] while n != 0: digits.append(n % 10) n //= 10 if reverse: digits.reverse() return digits
89d2deee27590f87e9fbbdd439dde83ac10b743c
89,016
def includes_subpaths(path) : """Checks if a given path includes subpaths or not. It includes them if it ends in a '+' symbol. """ return path[-1] == '+'
384de03653449badc5323ceb89180ad0c24e798c
89,017
def response(speech_message): """ create a simple json response """ return { 'version': '1.0', 'response': speech_message }
3fcae7e158d1c1249b01c0c1b5c3095c7a603b0f
89,018
def str_to_int_array(string, base=16): """ Converts a string to an array of integer values according to the base specified (int numbers must be whitespace delimited).\n Example: "13 a3 3c" => [0x13, 0xa3, 0x3c] :return: [int] """ int_strings = string.split() return [int(int_str, base) for int_str in int_strings]
febd897cb3ccd149dd539c8674c529d104382189
89,020
import difflib def _diff(a, b): """ diff of strings; returns a generator, 3 lines of context by default, - or + for changes """ return difflib.unified_diff(a, b)
14d2821e214ed9b1e3203ecdfe3ded7b8e9018d7
89,022
from typing import List import plistlib def load_plist(plist_path: str) -> List[dict]: """ Open a plist file. :param plist_path: The path to plist file. :return: The plist dictionary. In our particular case is a list of dicts. """ with open(plist_path, 'rb') as fp: plist = plistlib.load(fp) return plist
d03e7f61ff1c3433e1337e25bdc606ebb6f13633
89,024
def rescaleInput(input): """ scales input's elements down to range 0.1 to 1.0. """ return (0.99 * input / 255.0) + .1
06f097e2f8dafaaffda395ef590a1ac5cb7b78dd
89,025
def snakecase_to_camelcase(value): """Convert a snakecase string to a camelcase string. Args: value: The input snakecase string (e.g. 'grasp_env'). Returns: The converted camelcase string (e.g. 'GraspEnv'). """ words = value.split('_') return ''.join(word.title() for word in words)
11dd5e33b864100fb18e8f8f8f9f541d886664ad
89,027
def select_migrations(current, target, migration_ids): """ Select direction and migrations to run, given current and target migrations, from a list of migration ids """ if target > current: return 'forward', [ id_ for id_ in migration_ids if current < id_ <= target ] if target < current: return 'reverse', [ id_ for id_ in reversed(migration_ids) if target < id_ <= current ] return None, []
6fcbfb2c881aa0d69290474348d3a76ec696892d
89,029
import math def torad(deg): """ convert degrees to radians """ return math.pi/180.0*deg
655fdb5377f93eae1ecc8736f341b3683d41493a
89,030
from typing import Union def del_dict_keys(data: dict, keys: Union[tuple, list]) -> dict: """Deletes multiple keys from dictionary. Args: data: A dictionary. keys: Keys to be deleted. Returns: Dictionary without the deleted keys. """ temp_dict = data.copy() for key in keys: if key in data: del temp_dict[key] return temp_dict
04af560e521e554d19c6d179d224e32d027fd070
89,038
def is_port(port): """ returns true if the port is within the valid IPv4 range """ if port >= 0 and port <= 65535: return True return False
c45115d2ecbd92a2a52dfe6ca3e7af570c619cda
89,043
import re import html def html_parse(string): """ Simple html parser for MAL""" pattern = '<br[ ]?/?>|\[/?\w\]' string = re.sub(pattern, '', string) return html.unescape(string)
dc2f1c24ae35da6e417d2256ea4ff6c7efa65da3
89,044
import sqlite3 def open_sqlite(input_db): """Open a SQLite database Args: input_db: Path to a SQLite database to open Returns: A connection to a SQLite database """ print("Provided Database: {}".format(input_db)) return sqlite3.connect(input_db)
7830ec140b4c03efd769f8e025f0a2a85b74303f
89,050
def confidence_bound(gpmodel, X_full, **kwargs): """ Confidence bound acquisition function (a modification of upper confidence bound) Args: gpmodel (gpim reconstructor object): Surrogate function that allows computing mean and standard deviation X_full (ndarray): Full grid indices **alpha (float): :math:`\\alpha` coefficient in :math:`\\alpha \\mu + \\beta \\sigma` **beta (float): :math:`\\beta` coefficient in :math:`\\alpha \\mu + \\beta \\sigma` Returns: Acquisition function values and GP prediction (mean + stddev) """ alpha = kwargs.get("alpha", 0) beta = kwargs.get("beta", 1) mean, sd = gpmodel.predict(X_full, verbose=0) acq = alpha * mean + beta * sd return acq, (mean, sd)
032a063f76ba2aa1c2b5928140041f6c8a00ca28
89,053
def _get_filter_items(filter_previously_rated, previously_liked, filter_items): """Determine which items to filter out.""" if filter_previously_rated: previously_liked = set(previously_liked) if filter_items is not None: filter_items = set(filter_items) previously_liked = previously_liked.union(filter_items) return previously_liked # type: set else: if filter_items is not None: return set(filter_items) else: return set()
8cd6094e2458d7839a830b4c4ea2893dda80bb9b
89,054
def image_displacement_to_defocus(dz, fno, wavelength=None): """Compute the wavefront defocus from image shift, expressed in the same units as the shift. Parameters ---------- dz : `float` or `numpy.ndarray` displacement of the image fno : `float` f/# of the lens or system wavelength : `float`, optional wavelength of light, if None return has units the same as dz, else waves Returns ------- `float` wavefront defocus, waves if Wavelength != None, else same units as dz """ if wavelength is not None: return dz / (8 * fno ** 2 * wavelength) else: return dz / (8 * fno ** 2)
af4552b24941a74bcd8abc1ab2f9562f5bd05ba6
89,055
import zipfile def read_data(filename): """Extract the first file enclosed in a zip file as a list of words""" with zipfile.ZipFile(filename) as f: data = f.read(f.namelist()[0]).split() return data
08d8844025e5bb10a66301e35b8bbe4f37b886c9
89,057
def scale_image(image, cube_size): """ Scale the input image to a whole number of cubes with minimal stretch. :param image: PIL Image object. :param cube_size: Cube size from configuration, int. :return: A scaled PIL Image. """ image_w, image_h = image.size scale_h = image_h // cube_size # Amount of cubes that fit in the height new_h = scale_h * cube_size scale_w = image_w // cube_size # Amount of cubes that fit in the width new_w = scale_w * cube_size new_image = image.resize((new_w, new_h)) # Resize the image accordingly. return new_image
49a7c79e0d1b8aa177c1ed8dc92e28ed2df31022
89,063
import copy def cut(motion, frame_start, frame_end): """ Returns motion object with poses from [frame_start, frame_end) only. The operation is not done in-place. Args: motion: Motion sequence to be cut frame_start, frame_end: Frame number range that defines the boundary of motion to be cut. Pose at frame_start is included, and pose at frame_end is excluded in the returned motion object """ cut_motion = copy.deepcopy(motion) cut_motion.name = f"{motion.name}_{frame_start}_{frame_end}" cut_motion.poses = motion.poses[frame_start:frame_end] return cut_motion
e29db1e86c53aa342464dbf61e0206ac6a34adda
89,066
from pathlib import Path def filter_extensions(files, extensions): """Filter files by extensions.""" extensions = {f".{ext}" for ext in extensions} return [x for x in files if Path(x).suffix in extensions]
2acd9f79efdef3a0855c78598dbd906e5eee2b21
89,070
def _parse(tag): """ Used by the BeautifulSoup parser to get the wanted tags """ # Return if it's the title of the PLL Alg if tag.has_attr('id'): return '_Permutation' in tag['id'] # Return if it's an actual alg which are a tags with the following classes elif tag.name == 'a' and tag.has_attr('class'): return tag['class'] == ['external', 'text'] # Otherwise don't return anything return False
39969962e19220e887c6bf7a5e99ffcf2e79353c
89,073
from typing import List import re def extract_product_handles(text: str, limit: int) -> List[str]: """Extract first <limit> product handles Parameters: text (str): Text to search product handles Returns: List[str]: List of found product handles """ pattern = r"\/collections\/all\/products\/([a-zA-Z0-9_-]+)\"" handles = re.findall(pattern, text) if len(handles) < 1: return [] # Remove duplicates handles = list(dict.fromkeys(handles)) return handles[:limit]
ae8f0787c56cebc78bfcd803326832fdc4ff28b2
89,074
def make_slist(l, t_sizes): """ Create a list of tuples of given sizes from a list Parameters ---------- l : list or ndarray List or array to pack into shaped list. t_sizes : list of ints List of tuple sizes. Returns ------- slist : list of tuples List of tuples of lengths given by t_sizes. """ out = [] # output start = 0 for s in t_sizes: out.append(l[start:start + s]) start = start + s return out
b87ce7a430751d9b46e94609736bb9e25a180b27
89,077
def stripAndRemoveNewlines(text): """Removes empty newlines, and removes leading whitespace. """ no_empty_newlines = "\n".join([ll.rstrip() for ll in text.splitlines() if ll.strip()]) return no_empty_newlines.strip()
636cac9b11ea4d39a58bab028ae49dff75d71c66
89,080
def unpack_Any(any_msg, msg_class): """Unpacks any_msg into msg_class. Returns None if msg_class is None. """ if msg_class is None: return None msg = msg_class() any_msg.Unpack(msg) return msg
451bb846475045179e27405b156fc65a0a3e03f8
89,081
def is_absolute_http_url(url): # type (str) -> bool """Tell if a string looks like an absolute HTTP URL """ try: return url[0:6] in {'http:/', 'https:'} except (TypeError, IndexError, ValueError): return False
8c89850c8f0eba64db1a3fd44bb9cebaecc5f973
89,082
def is_nova_server(resource): """ checks resource is a nova server """ return ( isinstance(resource, dict) and "type" in resource and "properties" in resource and resource.get("type") == "OS::Nova::Server" )
c412455c6a22f3ac1b1eb074caeec70d54bf0fbd
89,083
def _band_shortname(long_name): """Get short band name, e.g. `Near Infrared (NIR)` becomes `nir` and `Red` becomes `red`. """ if '(' in long_name: start = long_name.find('(') + 1 end = long_name.find(')') short_name = long_name[start:end] else: short_name = long_name.replace(' ', '_').replace('-', '_') return short_name.lower()
87762f305cc622b2fb743a8fb9d5c1af41ac67f1
89,084
import random def random_colors(amount:int) -> list: """ Create an amount of random colors inside a list. """ return ["#"+''.join([random.choice('ABCDEF0123456789') for i in range(6)]) for j in range(amount)]
9f18124a8e9c2d7d9dc8a7775374962c2c0c5eb8
89,089
def demo_app_link_id(name): """Returns the value of the id of the dcc.Link related to the demo app. """ return 'app-link-id-{}'.format(name.replace("_", "-"))
d2acdb4b24b6e14d922543e1dc2e003ffc3dca62
89,093
from bs4 import BeautifulSoup def parse_html(request_res): """ Parses the HTML of a page """ soup = BeautifulSoup(request_res.text, 'html.parser') return soup
00333bf4d672805f6e7ce609584796823cb65d8a
89,094
def conflict_update_condition(columns): """ Create the (condition) portion of the "ON CONFLICT (condition) DO UPDATE (actions)" statement. """ if columns and len(columns) > 0: return f"({columns if isinstance(columns, str) else ', '.join(columns)})" else: raise TypeError("Columns are required.")
7d3f8a876f57548d908f8aef1279abe6b9fd0c81
89,096
def RemoveTrailingSlashes(path): """Removes trailing forward slashes from the path. We expect unix style path separators (/). Gyp passes unix style separators even in windows so this is okay. Args: path: The path to strip trailing slashes from. Returns: The path with trailing slashes removed. """ # Special case a single trailing slash since '/' != '' in a path. if path == '/': return path return path.rstrip('/')
906fcb42f5b351d168a37bc9529580cbe3406d0e
89,099
def ftime(time_): """formats a datetime""" return time_.strftime("%m/%d/%y %H:%M")
e5e63e4bc855b0a37b9059d80a7612d0c9fb4f55
89,103
def find_dict_with_keyvalue_in_json(json_dict, key_in_subdict, value_to_find): """ Searches a json_dict for the key key_in_subdict that matches value_to_find :param json_dict: dict :param key_in_subdict: str - the name of the key in the subdict to find :param value_to_find: str - the value of the key in the subdict to find :return: The subdict to find """ for data_group in json_dict: if data_group.get(key_in_subdict) == value_to_find: return data_group raise KeyError
2eb55e7d65e98ee80f56681d5f8c2e3e8227d979
89,107
def fit_sklearn_model(ts, model, test_size, val_size): """ Parameters: ts (pandas.DataFrame): time series values created by src.time_series_functions.create_windowing model (Sklearn Model): base model to predict ts test_size (int): size of test set val_size (int): size of validation set (if you do not use validation set, val_size can be set as 0) Returns: Sklearn Model: trained model """ train_size = len(ts) - test_size - val_size y_train = ts['actual'][0:train_size] x_train = ts.drop(columns=['actual'], axis=1)[0:train_size] return model.fit(x_train, y_train)
b2f3d25566eb506a8dc7fec3cbfdc9c4cf57eff9
89,109
import functools def coroutine(func): """ Co-Routine decorator that wraps a function and starts the co-routine """ def start(*args, **kwargs): target = func(*args, **kwargs) target.send(None) functools.update_wrapper(start, func) return target return start
fca47a9e884cbfe81523c5ea0e597f5363dc960e
89,114
def buildBoard(x, y): # function that creates a board with 1 values and returns it """ :param x - horizontal length :param y - vertical length :return: created board """ board = [] for i in range(x): board_row = [] for j in range(y): board_row.append(1) board.append(board_row) return board
0440e5bd0d89280f821f1ce62de7a62102aab1fb
89,117
def get_batch_series(source): """Extracts sim_batch and series values from full source string e.g.: sim10_1 would return (10, 1) """ stripped = source.strip('sim') sim_batch = int(stripped[:2]) series = int(stripped[3:]) return sim_batch, series
32606ab28ad78623542d61399d242c46b92e8021
89,119
def get_qumodes_operated_upon(op): """ Helper function that returns list of integers, which are the qumode indexes that are operated by op. """ return [reg.ind for reg in op.reg]
a10deeddc78e38a7b1d0a90ed069762303e1f636
89,125
from typing import List def prettify_alignment(a: List, b: List) -> str: """ Makes two alignments prettier to print next to each other :param a: Alignment (Google side) :param b: Alignment (Transcript side) :return: Prettified alignment """ a = ["-" if el is None else el for el in a] b = ["-" if el is None else el for el in b] combined = [e[0].ljust(30) + e[1] for e in zip(a, b)] return "\n" + ("\n".join(combined))
0945e1b692577dea9559dc602beaf9c29c69fe11
89,126
def fact_ad(x, n): """ Returns x(x-1)(x-2)...(x-n+1), the product of n terms, factorial-like operation Parameters: x, n: two scalars Returns: x(x-1)(x-2)...(x-n+1): scalar Example: >>> fact_ad(5,4) 120 """ prod = 1 for i in range(n): prod = prod * (x - i) return prod
7d9288c614b250f3835d4a33407443975007fea0
89,129
import torch def evaluate_performance_batch(model,batch, criterion, device = 'cuda'): """Evaluate performance for a single batch""" with torch.no_grad(): images, labels = tuple(map(lambda x: x.to(device), batch)) predictions = model.forward(images) _, predict = torch.max(predictions, 1) correct = (predict == labels).sum().item() total = len(labels) return correct, total
3ad843d3d7c21b0a3b065c746dbb68b359733efd
89,131
def select_dict(coll, key, value): """ Given an iterable of dictionaries, return the dictionaries where the values at a given key match the given value. If the value is an iterable of objects, the function will consider any to be a match. This is especially useful when calling REST APIs which return arrays of JSON objects. When such a response is converted to a Python list of dictionaries, it may be easily filtered using this function. :param iter coll: An iterable containing dictionaries :param obj key: A key to search in each dictionary :param value: A value or iterable of values to match :type value: obj or iter :returns: A list of dictionaries matching the query :rtype: list :Example: :: >>> dicts = [ ... {'hi': 'bye'}, ... {10: 2, 30: 4}, ... {'hi': 'hello', 'bye': 'goodbye'}, ... ] >>> select_dict(dicts, 'hi', 'bye') [{'hi': 'bye'}] >>> select_dict(dicts, 'hi', ('bye', 'hello')) [{'hi': 'bye'}, {'hi': 'hello', 'bye': 'goodbye'}] """ if getattr(value, '__iter__', None): iterable = value else: iterable = [value] return [v for v in coll if key in v and v[key] in iterable]
c5911af7db8d1432235b758eb4d7712ee1ec0f6d
89,138
def computeSetOfParetoPointsTuplesFromListOfParetoPoints(ListOfParetoPointsAsDicts): """ Given a list of pareto points encoded as a dictionary, we compute a set of the same pareto points (aka remove duplicates) encoded as tuples ordered by the order of the keys (returned by keys() method of first item) . """ setOfParetoPointsTuples = set() if len(ListOfParetoPointsAsDicts) > 0 : keyset = ListOfParetoPointsAsDicts[0].keys() for row in ListOfParetoPointsAsDicts: ParetoPointQualityValues = tuple(row[QualityAttributeKey] for QualityAttributeKey in keyset) setOfParetoPointsTuples.add(ParetoPointQualityValues) return setOfParetoPointsTuples
19d10e486cdfe97abe61f19ea3792d2c07e9e4d0
89,139
from typing import Any import requests def request_with_retry( method: str, url: str, **kwargs: Any ) -> requests.models.Response: """ Wrapper function for `requests.request` that retries timeout, connection, and server-side http errors with exponential backoff and jitter. For more info: https://docs.python-requests.org/en/latest/api/#requests.request """ response = requests.request(method, url, **kwargs) if 500 <= response.status_code < 600: response.raise_for_status() return response
0d595517604467f88f167e49e5bf92e561863887
89,140
def api_repo_url(org_name): """ With the supplied organization name, constructs a GitHub API URL :param org_name: GitHub organization name :return: URL to GitHub API to query org's repos """ return 'https://api.github.com/orgs/{}/repos'.format(org_name)
e602303d680bef850e7053ff589a3a76382d4833
89,142
import re def normalize(text: str): """ Strips out formatting, line breaks, and repeated spaces to normalize the string for comparison. This ensures minor formatting changes are not tagged as meaningful changes """ s = text.lower() s = s.replace("\n", " ") s = re.sub(r'[`*\'"]', "", s) s = re.sub(r"\s+", " ", s) return s
7470a88aa8990853b27322834f65a2ef6d77e64b
89,144
def pipeline_splitting_rule(val_size = 0.2, test_size = 0.2, random_state = 13): """Setup percentage of train, validate, and test of each pipeline's dataset in Pipeline Cluster Traversal Experiments. Parameters ---------- val_size : float, default = None Value within [0~1]. Percentage of validate data. test_size : float, default = None Value within [0~1]. Percentage of test data. random_state : int, default = 13 Random state value. Returns ------- Deliver the percentage values to splitting tool function. """ custom_val_size,custom_size,custom_random_state = val_size, test_size, random_state return(custom_val_size,custom_size,custom_random_state)
9ea695eb2d0ec155234b05bf723874159e9b61ef
89,146
def delistify(some_list): """ Untangle multiple nested one-element lists. Occasionally a problem in Libris, e.g. "[['x']]". Converts it to 'x'. @param some_list: list to convert. @type some_list: list """ while isinstance(some_list, list) and len(some_list) == 1: some_list = some_list[0] return some_list
3969714b4f58e299fb08efbf2ac80cfc6cda80c9
89,147
def longest_loc_length(book): """ Return the length of the longest location key string. """ loc_length = 0 for loc_string in book.keys(): if len(loc_string) > loc_length: loc_length = len(loc_string) return loc_length
99878c70d91608688d19fc974f86e401ade800cc
89,153
def _get_weights_dict(obj, suffix=''): """ Get the dictionary of weights. Parameters ---------- obj : Model or Optimizer The target object that we want to get the weights. Returns ------- dict The weight dictionary. """ weights_dict = {} weight_names = [weight.name for weight in obj.weights] weight_values = obj.get_weights() for name, value in zip(weight_names, weight_values): weights_dict[name + suffix] = value return weights_dict
a517966c650f830f308350d1f7e545018cf10032
89,166
def clamp(x, a, b): """Clamps value x between a and b""" return max(a, min(b, x))
79fffd1e2f15c36a8cba8a2a405e80ff32c0be49
89,168
from typing import Union def is_downstream(pfaf_id_a: Union[int, str], pfaf_id_b: Union[int, str]) -> bool: """Calculate if pfaf_id_b is downstream of pfaf_id_a Implemented as in https://en.wikipedia.org/wiki/Pfafstetter_Coding_System#Properties Works even if pfaf_id_a and pfaf_id_b are at different levels. :param pfaf_id_a: first Pfafstetter id (upstream) :param pfaf_id_b: second Pfafstetter id (downstream) :return: `True` if pfaf_id_b is downstream of pfaf_id_a, `False` otherwise or if a == b """ if str(pfaf_id_a) == str(pfaf_id_b): # Basin is not downstream of itself. return False n = 0 for c1, c2 in zip(str(pfaf_id_a), str(pfaf_id_b)): if c1 == c2: n += 1 else: break # First n digits are the same. # In case where b is shorter than a (e.g. b is level 1, a is level 2), only compare the matching digits up to # the length of b. min_len_a_b = min(len(str(pfaf_id_a)), len(str(pfaf_id_b))) if int(str(pfaf_id_b)[n:min_len_a_b]) < int(str(pfaf_id_a)[n:min_len_a_b]): # If any remaining digits in b are even it is not downstream. for d in [int(c) for c in str(pfaf_id_b)[n:]]: if d % 2 == 0 and d != 0: return False return True return False
9f450234dd7d6e3d9cb1b4494b9d9ccd91c4b33b
89,175
import re def whole_word_pattern(string: str) -> str: """ Transforms the string 'string' so that a regex search only matches the whole word form of 'string', and not as part of any substring. Taken from Felix Kling at https://stackoverflow.com/a/4155064. """ return r"\b" + re.escape(string) + r"\b"
445beb7a681c680abab5f8460c3775af2deab927
89,176
def make_ordinal(num): """ Create an ordinal (1st, 2nd, etc.) from a number. """ base = num % 10 if base in [0,4,5,6,7,8,9] or num in [11,12,13]: ext = "th" elif base == 1: ext = "st" elif base == 2: ext = "nd" else: ext = "rd" return str(num) + ext
d92069f2d1a88adeb1c72e7551adc75af84beb31
89,183
def _rhel_kernel_info(packages, kernel_version, current_version): """ Return kernel to install with associated repository. Args: packages (dict): DNF/YUM list output. kernel_version (str): Kernel version to install. current_version (str): Current kernel version. Returns: dict: kernel version, repository """ kernels = list() if current_version.startswith(kernel_version): kernel_version = current_version.rsplit(".", 1)[0] for line in packages["stdout"].splitlines(): if line.startswith("kernel.") and not line.startswith("kernel.src"): package = line.strip().split() kernels.append(dict(version=package[1], repo=package[2])) for kernel in reversed(kernels): if kernel["version"].startswith(kernel_version): return kernel raise RuntimeError( 'No kernel matching to "%s". Available kernel versions: %s' % (kernel_version, ", ".join(kernel["version"] for kernel in kernels)) )
117bd100ad12d011a46f8bd53a0b7a470ec96edc
89,185