content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Any
import pickle
def read_pickled(filepath: str, encoding: str = "latin1") -> Any:
"""Read the pickled data in the file
Args:
filepath (str): The file path
encoding (str): File encoding. Defaults to :data:`"latin1"`
Returns:
The unpickled data"""
with open(filepath, "rb") as f:
return pickle.load(f, encoding=encoding) | 2e7e316b7c81650b113e408e5bb5f6b2bac8f30d | 116,786 |
def norm_array(Array, Minval=0.001):
"""Normalise array by largest abs value if >Minval (avoids trying to renormalise zero array)"""
greatest = 0.0
flatarray = Array.flat
for i in range(len(flatarray)):
if abs(flatarray[i]) > abs(greatest):
greatest = flatarray[i]
if abs(greatest) > Minval:
newarray = Array / greatest
else:
newarray = Array
return newarray | ba2a6076146bb782364469b7f4557d23a03120ce | 116,787 |
import hashlib
def compute_file_hashsum(filename):
"""
Compute SHA256 file hashsum for {filename}.
"""
algo=hashlib.sha256()
maximum_block_size_in_bytes = 65536
with open(filename, 'rb') as file:
while True:
block = file.read(maximum_block_size_in_bytes)
if block:
algo.update(block)
else:
break
return algo.hexdigest() | b7e19dee4a29796e16ef8160be46629029484d67 | 116,788 |
def stringify_cookies(cookies: list[dict]) -> str:
"""
Creates a cookie string from a list of cookie dicts.
"""
return ";".join([f"{c['name']}={c['value']}" for c in cookies]) | f8c2f9041ff5c443452060c924f99ef3e7277c1c | 116,791 |
import pickle
def load_pk_file(file_name):
"""
Load data from a file using Pickle
path: the path to the required file
Return: the data in the form before saved
"""
with open(file_name,'rb') as f:
data = pickle.load(f)
return data | 2bac1d72878bd1be30d593e2324dd364f59b454a | 116,792 |
import torch
def atanh(x: torch.Tensor) -> torch.Tensor:
"""
Return atanh of the input. Modified torch.atanh in case the output is nan.
Parameters
----------
x: torch.Tensor
input
Returns
-------
y: torch.Tensor
atanh(x)
"""
return 0.5 * (torch.log(1 + x + 1e-6) - torch.log(1 - x + 1e-6)) | 0fc187ced21cb605d2eec643f91958f5079f4b7d | 116,801 |
def getNbins(h):
"""
To be used in loops on bin number with range()
For each dimension there are GetNbinsX()+2 bins including underflow
and overflow, and range() loops starts from 0. So the total number
of bins as upper limit of a range() loop already includes the next
to last value needed.
"""
biny=h.GetNbinsY()
if biny>1: biny+=2
binz=h.GetNbinsZ()
if binz>1:binz+=2
return (h.GetNbinsX()+2)*(biny)*(binz) | d946c4ebb26a678e5eb32e1fd0dc4963f5c2775e | 116,803 |
def format_key(tag, key):
"""
Format a key for insertion into the Redis cache.
:param tag: Tag associated with this key.
:param key: The actual key.
:return: A formatted key for insertion into the cache.
"""
return '{tag}:{key}'.format(tag=tag, key=key) | fae251de2c938c0f831b205ce3236a90e1f7a49b | 116,804 |
def _computeforwardmissing(a, b, match=None):
"""Computes which files are in b but not a.
This is its own function so extensions can easily wrap this call to see what
files _forwardcopies is about to process.
"""
ma = a.manifest()
mb = b.manifest()
return mb.filesnotin(ma, match=match) | f97810ebbdfa2d516d43b6c2587d4a0c9972e81d | 116,808 |
import glob
def _get_post_file(stub):
"""Locate a post's markdown file from its stub"""
files = glob.glob(f"../_posts/*{stub}.md")
if not files:
raise FileNotFoundError(f"Could not locate file with stub {stub}")
elif len(files) > 1:
raise ValueError(f"Multiple files located with stub {stub}")
return files[0] | bdfd1d28dc26c2bf3a8535e8d6b9ad6951f23da4 | 116,809 |
import random
def weighted_random_selection(obj1, obj2):
"""Randomly return one of the following, obj1 or obj2
:arg obj1: An instance of class AbstractGameUnit. It can be any object.
The calling code should ensure the correct object is passed
to this function.
:arg obj2: Another instance of class AbstractGameUnit
:return: obj1 or obj2
.. seealso:: :py:func:`weighted_random_selection_alternate` which is an
alternative implementation that is used to demonstrate
the importance of unit testing.
"""
selection = random.choices([id(obj1), id(obj2)], weights=[0.3, 0.7])
if selection[0] == id(obj1):
return obj1
return obj2 | 2f9ac12df2b91738aa5408f74a35116f42f32736 | 116,810 |
def matches(open_symbol: str, close_symbol: str) -> bool:
"""Checks if the opening and closing sybols match"""
symbol_close_map = {"(": ")", "[": "]", "{": "}"}
return close_symbol == symbol_close_map.get(open_symbol) | 159dd4b94fc5d32f9a7d7796206e606d7c4d71e4 | 116,811 |
def construct_hex(hex1, hex2, _bytes=2):
"""
Helper method to construct hex from two decomposed hex values
"""
bin1 = format(int(str(hex1), 16), f"0{_bytes * 4}b")
bin2 = format(int(str(hex2), 16), f"0{_bytes * 4}b")
bin_total = "".join(["0b", bin1, bin2])
return f'0x{format(int(bin_total, 2), f"0{_bytes * 2}x")}' | 1357fd6ba60daf5ceb7bdcdc4a7827eb4b5022a4 | 116,819 |
def apiname(field):
"""The full (qualified) programmatic name of a field"""
if field["prefix"]:
fieldname = u"{}.{}".format(field["prefix"], field["name"])
else:
fieldname = field["name"]
return fieldname | e72ac62b5a80494f30a0b2795383199372cbb844 | 116,821 |
def symbol_template(address, function_name):
"""Return an entry in the symbol array: {0xdeadbeef, "function"},"""
return "\t{0x" + address + ", " + "\"" + function_name + "\"" + "},\n" | 095437e1ea438e5def93babd214f1ec2edb9c74a | 116,823 |
def remove_empty_leading_trailing(lines):
"""
Removes leading and trailing empty lines.
A list of strings is passed as argument, some of which may be empty.
This function removes from the start and end of the list a contiguous
sequence of empty lines and returns the result. Embedded sequences of
empty lines are not touched.
Parameters:
lines List of strings to be modified.
Return:
Input list of strings with leading/trailing blank line sequences
removed.
"""
retlines = []
# Dispose of degenerate case of empty array
if len(lines) == 0:
return retlines
# Search for first non-blank line
start = 0
while start < len(lines):
if len(lines[start]) > 0:
break
start = start + 1
# Handle case when entire list is empty
if start >= len(lines):
return retlines
# Search for last non-blank line
finish = len(lines) - 1
while finish >= 0:
if len(lines[finish]) > 0:
break
finish = finish - 1
retlines = lines[start:finish + 1]
return retlines | 038471d404b538447a5cb72d9eccc9fe15889cb0 | 116,831 |
def _data_header(cols, parent_dict):
"""Constructs a header row for the worksheet based on the columns in the table and contents of the parent row"""
out = []
for col in cols:
if col == 'gau_id':
out.append(parent_dict['geography_id'])
elif col == 'oth_1_id':
out.append(parent_dict['other_index_1_id'])
elif col == 'oth_2_id':
out.append(parent_dict['other_index_2_id'])
else:
out.append(col)
return out | 4f3766542f5fcdeb6023044338f387680d93f266 | 116,836 |
import json
def read_json(json_file: str):
"""
Read a json file.
Args:
json_file (str): name of the json file. Must be 'name.json'.
Returns:
dict: returns a dict of the json file.
"""
with open(json_file, 'r', encoding='utf8') as file:
return json.load(file) | f79f828763b42878b972beebe680e655aa2eaa1a | 116,846 |
import random
def rand_hex_str(str_len:int = 2) -> str:
"""
Random hex string of specified length
Parameters
----------
str_len : int
Length of random hex string, default 2 (characters)
Returns
-------
rand_str : str
String with random hexadecimal characters
"""
# IMPORT DONE HERE TO SAVE TIME AT MODULE INIT
if str_len <= 0:
raise ValueError('Invalid string length.')
s = ''
while str_len > 6:
s += rand_hex_str(6)
str_len -= 6
s += ('{0:0' + str(str_len) + 'x}').format(random.randrange(16 ** str_len))
return s | 34f8fef7407902865a08d559ef13bc59768aea1b | 116,849 |
def length_of_elements(elementList, index=0, lengths=None):
"""
Returns the length of each row (sub-array) in a single array
"""
if lengths is None:
lengths = []
# Will only calculate len if index is lower than the len of the array.
# If it isn't less, will return the final array of lengths.
if index < len(elementList):
# Appends the length of the current element using the
# "index" param, which starts as 0.
lengths.append(len(elementList[index]))
# For each time it appends a length, calls again the function, sending
# the listOfElements, the lengths array with the previous value\s and the
# index plus 1, last one so looks for the next element
length_of_elements(elementList, index + 1, lengths)
return lengths | 809fffcdb2a466839bdf168a5c27c74244be631d | 116,851 |
def orth(string):
"""Returns a feature label as a string based on the capitalization and other orthographic characteristics of the string."""
if string.isdigit():
return 'DIGIT'
elif string.isalnum():
if string.islower():
return 'LOWER'
elif string.istitle():
return 'TITLE'
elif string.isupper():
return 'UPPER'
else:
return 'MIXED'
elif string.isspace():
return 'SPACE'
else:
return 'OTHER' | b29b41a7663bd5fce6c82ab56472b5e83cbe3298 | 116,856 |
from typing import Tuple
def parse(line: str) -> Tuple[str, str]:
"""Parses import lines for comments and returns back the
import statement and the associated comment.
"""
comment_start = line.find("#")
if comment_start != -1:
return (line[:comment_start], line[comment_start + 1 :].strip())
return (line, "") | 6dcff8ff3f8399b353f93e033ad8c0c34fe30c8a | 116,857 |
def rsync(s, d, test=False, config='dts', reverse=False):
"""Set up rsync command.
Parameters
----------
s : :class:`str`
Source directory.
d : :class:`str`
Destination directory.
test : :class:`bool`, optional
If ``True``, add ``--dry-run`` to the command.
config : :class:`str`, optional
Pass this configuration to the ssh command.
reverse : :class:`bool`
If ``True``, attach `config` to `d` instead of `s`.
Returns
-------
:class:`list`
A list suitable for passing to :class:`subprocess.Popen`.
"""
c = ['/bin/rsync', '--verbose', '--recursive',
'--copy-dirlinks', '--times', '--omit-dir-times']
if reverse:
c += [s + '/', config + ':' + d + '/']
else:
c += [config + ':' + s + '/', d + '/']
if test:
c.insert(1, '--dry-run')
return c | 74d02e5ae5abbc6acfe3b3622f64942865e86c0b | 116,858 |
def validate_uniform_range(range):
""" Validate a uniform range
Args:
range (:obj:`data_model.UniformRange`): range
Returns:
nested :obj:`list` of :obj:`str`: nested list of errors (e.g., required ids missing or ids not unique)
"""
errors = []
if range.number_of_steps < 1:
errors.append(['Uniform range `{}` must have at least one step.'.format(
range.id)])
return errors | edacf02e93bc6715ee1870ea11f4769df084756d | 116,860 |
def notifier_title_cleaner(otitle):
"""
Simple function to replace problematic Markdown characters like `[` or `]` that can mess up links.
These characters can interfere with the display of Markdown links in notifications.
:param otitle: The title of a Reddit post.
:return otitle: The cleaned-up version of the same title with the problematic characters escaped w/ backslashes.
"""
# Characters to prefix a backslash to.
specific_characters = ["[", "]"]
for character in specific_characters:
if character in otitle:
otitle = otitle.replace(character, r"\{}".format(character))
return otitle | 02d355e25fa8bcf252fc520daabecdb3ec98ac55 | 116,861 |
def delete_comments(line):
"""Deletes comments in parentheses from a line."""
fields = line.split(')')
result = []
for f in fields:
if '(' in f:
result.append(f.split('(',1)[0])
else:
result.append(f)
return ''.join(result) | f20b1070ec0dbc6274407a9490ce9649789efc7a | 116,865 |
import sqlite3
def connect_to_db(db_name = "rpg_db.sqlite3"):
"""
Function to connect to a SQL database
"""
return sqlite3.connect(db_name) | f59287b75d461f52aa4c90bc4a073fc70c069c96 | 116,867 |
def flip_cold(ds):
"""Flip mhw intensities if cold spell.
Parameters
----------
ds: xarray Dataset
Includes MHW properties
Returns
-------
ds: xarray Dataset
Includes MHW properties with flipped values for intensities
"""
for varname in ds.keys():
if 'intensity' in varname and '_var' not in varname:
ds[varname] = -1*ds[varname]
return ds | 3cea96c889491053107e1065e06a96200916d983 | 116,868 |
import requests
def pulsar_create(addr, auth, **kwargs):
"""
Create a new pulsar.
Args:
addr: hostname or ip address of database server.
auth: tuple of username and password.
name: name of pulsar.
ra: right ascension of pulsar.
dec: declination of pulsar.
Raises:
Exception if pulsar already exists or there is an input error.
"""
path = '{0}/{1}/'.format(addr, 'pulsar_create')
r = requests.post(url=path, auth=auth, data=kwargs)
r.raise_for_status()
return r.json() | e0837269aa6ffe254e4c39368de069ad466191cd | 116,871 |
def convert_camel_case(s: str) -> str:
"""convert to camel case
Args:
s (str): str
Returns:
str: camel case str
"""
return s.title().replace("_", "").replace("-", "") | 1c35fedac864b16dd989af45fe18f4f91f3f004e | 116,872 |
def DecodeFATTime(FATdate, FATtime):
"""Convert the 2x16 bits of time in the FAT system to a tuple"""
day = FATdate & 0x1f
month = (FATdate >> 5) & 0xf
year = 1980 + (FATdate >> 9)
sec = 2 * (FATtime & 0x1f)
min = (FATtime >> 5) & 0x3f
hour = FATtime >> 11
return (year, day, month, hour, min, sec) | 53d1558417d14226fc3cea6f0d2504f6d777afe9 | 116,874 |
def dB_function(dB):
""" Convert dB into linear units """
linear = 10**(dB/10.)
return linear | fdf43e034351f38740bd8dcf468a31590c602d87 | 116,878 |
def fast_replace(t, sep, sub=None):
"""
Replace separators (sep) with substitute char, sub. Many-to-one substitute.
"a.b, c" SEP='.,'
:param t: input text
:param sep: string of chars to replace
:param sub: replacement char
:return: text with separators replaced
"""
result = []
for ch in t:
if ch in sep:
if sub:
result.append(sub)
else:
result.append(ch)
return ''.join(result) | 9ab7155dd837666afb55c55b43b88ad04b745967 | 116,886 |
import json
def read_dataformat(file):
"""
Reads a dataformat .json file and returns it as a dict.
Parameters
----------
file : str
Path to dataformat.json file.
"""
return json.load(open(file)) | 3676bb2e2e2bb61649183aae41dfbff63018f793 | 116,888 |
from typing import List
def compute_columns_width(data: List[List[str]], padding: int = 2) -> List[int]:
"""Compute columns width for display purposes:
Find size for each columns in the data and add padding.
Args:
data (List[List[str]]): Tabular data containing rows and columns.
padding (int): Number of character to adds to create space between columns.
Returns:
columns_width (List[int]): The computed columns widths for each column according to input data.
"""
columns_width = [0 for _ in data[0]]
for row in data:
for i, col in enumerate(row):
current_col_width = len(col) + padding
if current_col_width > columns_width[i]:
columns_width[i] = current_col_width
return columns_width | 3bea8616b997effecd2626ade642f64ba617f24c | 116,891 |
def replace_none(idx, dim):
"""
Normalize slices to canonical form, i.e.
replace ``None`` with the appropriate integers.
Parameters
----------
idx: slice or other index
dim: dimension length
Examples
--------
>>> replace_none(slice(None, None, None), 10)
slice(0, 10, 1)
"""
if not isinstance(idx, slice):
return idx
start, stop, step = idx.start, idx.stop, idx.step
if step is None:
step = 1
if step > 0:
if start is None:
start = 0
if stop is None:
stop = dim
else:
if start is None:
start = dim - 1
if stop is None:
stop = -1
return slice(start, stop, step) | 31c96fb4b769883a4cb0cbd70fdc5cbe45e0677b | 116,896 |
from typing import List
from typing import Any
def list_copy(lst: List[Any]) -> List[Any]:
"""Copy the given list and returns new copied list"""
new_list = [each_element for each_element in lst]
return new_list | eda9cb880119269c05020ce1551c8e79b8861d01 | 116,904 |
def _ExtractObsoleteNode(node, recursive=True):
"""Extracts obsolete child from |node|. Returns None if not exists."""
if not recursive:
obsolete = [
element for element in node.getElementsByTagName('obsolete')
if element.parentNode == node
]
else:
obsolete = node.getElementsByTagName('obsolete')
if not obsolete:
return None
assert len(obsolete) == 1, (
'Node %s should at most contain one obsolete node.' %
node.getAttribute('name'))
return obsolete[0] | a72565ff3d43ac191aca1f4cb1ad9149c7ead93d | 116,907 |
from typing import Sequence
from typing import Callable
from typing import Any
def remove_if_possible(options: Sequence[str], predicate: Callable[[Any], bool]):
"""
Removes all entries in options where the predicate is true,
unless that would leave an empty list of options.
"""
pred_vec = [predicate(option) for option in options]
if any(pred_vec) and not all(pred_vec):
options = [option for option, pred in zip(options, pred_vec) if not pred]
return options | e4db6b1c8545fec039e2e93f3afc70926641e906 | 116,914 |
def ltc_public_byte_prefix(is_test):
"""LITECOIN Address prefix. Returns b'\30' for main network and b'\x6f' for testnet"""
# XXX guessing at LTC-testnet value, can't find a reference.
return b'\x6f' if is_test else b'\x30' | 808a4d3c3c152061661c97b994759459ce32c89d | 116,915 |
def subpartition(quadtree, nmin, nmax):
"""
Recursive core of the ``QuadTree.partition`` method. Just five lines of code, amazingly.
Parameters
----------
quadtree : QuadTree object instance
The QuadTree object instance being partitioned.
nmin : int
The splitting threshold. If this is not met this method will return a listing containing the root tree alone.
Returns
-------
A (probably nested) list of QuadTree object instances containing a number of points respecting the threshold
parameter.
"""
subtrees = quadtree.split()
if quadtree.n > nmax:
return [q.partition(nmin, nmax) for q in subtrees]
elif any([t.n < nmin for t in subtrees]):
return [quadtree]
else:
return [q.partition(nmin, nmax) for q in subtrees] | e01c0bc5f3c3b1e5342619858e60ad65f7ea54d1 | 116,916 |
import requests
def check_asa_network_object(name, asa):
"""
This function searches for an existing network object
:param name: Name of the network object.
:param asa: The ASA which is searched.
:return: Returns True if the object exsists, else False
"""
url = asa.url() + "/api/objects/networkobjects/" + name
headers = {
'Content-Type': 'application/json',
'User-agent': 'REST API Agent',
'X-Auth-Token': asa.token
}
response = requests.request("GET", url, headers=headers, verify=False)
if response.status_code != 200:
return False
else:
return True | cfa8863dbf7e1c05932e71c2e93681dc0f9a5421 | 116,918 |
def fqdn_split(fqdn):
"""split a fully qualified domain and return the host name and dns domain,
when no domain has been specified an empty string will be returned as
dns domain"""
split_host = fqdn.split('.')
if len(split_host) == 1:
host_name = split_host[0]
dns_domain = ''
else:
host_name = split_host.pop(0)
dns_domain = '.'.join(split_host)
return host_name, dns_domain | c920fbc0c6a8a60e9aa457b61a2143607846ed1c | 116,921 |
def footer_length(header):
"""Calculates the ciphertext message footer length, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:rtype: int
"""
footer_length = 0
if header.algorithm.signing_algorithm_info is not None:
footer_length += 2 # Signature Length
footer_length += header.algorithm.signature_len # Signature
return footer_length | 3750c2567b837593194442ebe8520f7faa53e7c4 | 116,924 |
def intToBinary(number, size):
"""
Converts a decimal number to binary form
Parameters:
==========
number: Integer
The Number to be converted into binary form
size: Integer
The maximum length of the returning binary list
Returns:
=======
list:
Returns a list of length size consisting of 0's and 1's
representing binary form of number
"""
binary = [0 for i in range(size)]
i = 0
while number > 0:
binary[i] = number%2
number = number // 2
i += 1
return binary | cbd7534dcd69a82982f7406a9a2eb017b7e6bf47 | 116,929 |
def pyqtSlot(*args, **kwargs):
"""Declare dummy function for environments without pyqt5."""
return lambda *x: None | 8d93e97508b1f6f2caf1a9e6aebc98f8dca13a6c | 116,933 |
import json
def parse_json(json_file):
"""
Parse a JSON configuration file
Parameters
----------
json_file : obj
File handle to the file to be parsed
Returns
-------
config : dict
Parsed configuration file
"""
config = json.load(json_file)
return config | 8e66049848fd8a8615797fb7c71acdf655a59f4b | 116,934 |
def _rear_left_tire_pressure_supported(data):
"""Determine if rear left tire pressure is supported."""
return data["status"]["tirePressure"]["rearLeftTirePressurePsi"] is not None | fb1e1ecf9a0252479c98b2b886799caaa3123dc8 | 116,937 |
def series_cdf(series):
"""Calculate cdf of series preserving the index
Parameters
----------
series : series like
Returns
-------
cdf : series
"""
index = series.index.tolist()
total = series.sum(skipna=True)
cdf = series.cumsum(skipna=True) / total
return cdf | d249742b3ac07a94a89a203da19c1781d70d5ccb | 116,938 |
import math
def get_distance(atom1, atom2):
"""
Returns distance between atom1 and atom2.
"""
return math.sqrt(sum((atom1[i]-atom2[i])**2 for i in 'xyz')) | 0ee610c592b51bdfdade382aa63ad81d89f72265 | 116,946 |
import yaml
import logging
def get_config_by_path(config_filepath, parse_yaml=True):
"""Read in the file given at specified config path
Args:
config_filepath (str): File path to klio config file
parse_yaml (bool): Whether to parse the given file path
as yaml
Returns:
python object of yaml config file or bytes read from file
"""
try:
with open(config_filepath) as f:
if parse_yaml:
return yaml.safe_load(f)
else:
return f.read()
except IOError:
logging.error("Could not read config file {0}".format(config_filepath))
raise SystemExit(1) | b52659092aa612816708e58dce5e738d96faa074 | 116,948 |
def _extractFeatureClasses(lookupIndex, subtableIndex, classDefs, coverage=None):
"""
Extract classes for a specific lookup in a specific subtable.
This is relatively straightforward, except for class 0 interpretation.
Some fonts don't have class 0. Some fonts have a list of class
members that are clearly not all to be used in kerning pairs.
In the case of a missing class 0, the coverage is used as a basis
for the class and glyph names used in classed 1+ are filtered out.
In the case of class 0 having glyph names that are not part of the
kerning pairs, the coverage is used to filter out the unnecessary
glyph names.
"""
# gather the class members
classDict = {}
for glyphName, classIndex in classDefs.items():
if classIndex not in classDict:
classDict[classIndex] = set()
classDict[classIndex].add(glyphName)
# specially handle class index 0
revisedClass0 = set()
if coverage is not None and 0 in classDict:
for glyphName in classDict[0]:
if glyphName in coverage:
revisedClass0.add(glyphName)
elif coverage is not None and 0 not in classDict:
revisedClass0 = set(coverage)
for glyphList in classDict.values():
revisedClass0 = revisedClass0 - glyphList
classDict[0] = revisedClass0
# flip the class map around
classes = {}
for classIndex, glyphList in classDict.items():
classes[lookupIndex, subtableIndex, classIndex] = frozenset(glyphList)
return classes | ccda72e6fd40ddc0b24251ff3a372df51101b8ad | 116,952 |
import re
def get_indentation(line):
"""
Take a line, and return its indentation as string
"""
pattern = re.compile(r"(.*?)(\w|\})")
match = pattern.search(line)
if not match:
return
indentation = match.group(1)
add_extra_indent = ('public', '}')
if any(s in line for s in add_extra_indent):
return indentation + indentation
return indentation | 31e17cb1c223dd2051fc08519c4bd51c9b696fb0 | 116,953 |
def get_version(user_id, version, client):
"""Requests a certain version of certain user."""
return client.get_user_version(str(user_id), version) | dfeca048d66daec0b3bd4cfd41b58fdfaecff6c8 | 116,954 |
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time | 767e516f01b4665f4155d23e4105dbaafca14b97 | 116,957 |
def all_subclasses(cls):
"""Recursively retrieve all subclasses of the specified class"""
return cls.__subclasses__() + [
g for s in cls.__subclasses__() for g in all_subclasses(s)
] | d720cf1f739c9591b407c46e6ad5b6a9a8be8700 | 116,958 |
def to_string(lista_ref):
""" Constrói uma string a partir dos elementos de uma lista
Args:
lista_ref (list): lista de referência para construção da string
Returns:
[str]: string formada pelos elementos da lista_ref
"""
return ''.join(lista_ref) | bac0b8a68958d773a6adf0d60363317cbcabb1ca | 116,973 |
def get_gap(numberlist: list):
"""Get gap
Can take an integer list returned by :meth:`get_numbers` and determine
the missing sequence number.
Args:
numberlist: List of numbers to find a gap in.
Returns:
Missing number in the sequence or None if there is no gap.
"""
seqlist = list(range(numberlist[0], numberlist[-1] + 1))
for element in seqlist:
if element not in numberlist:
return element
return None | 1b72de67a29fcc17964e5c10155e93f5fa7b1035 | 116,975 |
def exact_dp4(a_v, b_v):
""" Exact version (formal) of 4D dot-product
:param a_v: left-hand-side vector
:type a_v: list(SollyaObject)
:param b_v: right-hand-side vector
:type b_v: list(SollyaObject)
:return: exact 4D dot-product
:rtype: SollyaObject (value or expression)
"""
prod_v = [a * b for a, b in zip(a_v, b_v)]
acc = 0
for p in prod_v:
acc = p + acc
return acc | a05e863f3054f4d1b5c8ec5aaca36989090759f3 | 116,976 |
import difflib
def create_patch(filename: str, modified: str):
"""Create a unified diff between original and modified"""
with open(filename, "r") as f:
original = f.read()
patch = "\n".join(
difflib.unified_diff(
original.splitlines(),
modified.splitlines(),
fromfile=filename,
tofile=filename,
lineterm="",
)
)
return patch | c5c150d1a82393b37efd1e316e67c3ee3d8c8cf6 | 116,982 |
import hashlib
import math
def deterministic_random(input_string):
"""
Returns a deterministically random number between 0 and 1 based on the
value of the string. The same input should always produce the same output.
"""
if isinstance(input_string, str):
input_string = input_string.encode('utf-8')
return float.fromhex(hashlib.md5(input_string).hexdigest()) / math.pow(2, 128) | a7cc09b4e079a3a8488785e8c92cd237e2451b92 | 116,988 |
from typing import Iterable
def scheduler_story(keys: set, transition_log: Iterable) -> list:
"""Creates a story from the scheduler transition log given a set of keys
describing tasks or stimuli.
Parameters
----------
keys : set
A set of task `keys` or `stimulus_id`'s
log : iterable
The scheduler transition log
Returns
-------
story : list
"""
return [t for t in transition_log if t[0] in keys or keys.intersection(t[3])] | 7b6a3bb377ff962acdd9bd47855a83e388fc56be | 116,993 |
import functools
def collect_keys(table):
"""
Collects all names of keys (columns) used throughout the data
"""
just_keys = map(lambda entry: set(entry.keys()), table)
return sorted(list(functools.reduce(set.union, just_keys))) | 15dd34912c081be72c159fe7dfe3d621a2b21987 | 116,996 |
def get_simult_lines(tests, results, test_line_num=0):
"""Returns a line from the input/output, ensuring that
we are sync'd up between the two."""
test = tests.pop(0)
result = results.pop(0)
test_line_num += 1
if test != result:
raise Exception("Lost sync between files at input line %d.\n INPUT: %s\n OUTPUT: %s" % (test_line_num, test, result))
return test | 3c15a7f9cf8a896feb71ec7c865d1fc2a106ae7d | 116,997 |
def _to_pandas_from_pandas(obj):
"""
Convert Pandas DataFrame etc. to a Pandas DataFrame.
A no-op, in short.
"""
return obj | 1baa67ea6af05f40c133ed2316fc8c0da7fb873f | 116,999 |
def get_closer(a, b, test_val):
"""Returns which ever of a or b is closer to test_val"""
diff_a = abs(a - test_val)
diff_b = abs(b - test_val)
if diff_a < diff_b:
return a
else:
return b | 42452b2b03314f96ca8793802e1cd7e5ec7a8b1b | 117,000 |
def bias(obs, mod):
"""
Bias = mean(obs) - mean(mod)
"""
return obs.mean(axis=0) - mod.mean(axis=0) | 72315d45513e9181d522ec007ad9d587464f7f79 | 117,002 |
def _get_start_end(location):
"""
Get the start and the end of a location object. For point locations both
start and end equal the position value.
"""
if location["type"] == "range":
return location["start"]["position"], location["end"]["position"]
elif location["type"] == "point":
return location["position"], location["position"] | 985f130322cc93eae2683dbecad59380042fb4bc | 117,005 |
def response(code, description):
"""
Decorates a function with a set of possible HTTP response codes that
it can return.
Args:
code: The HTTP code (i.e 404)
description: A human readable description
"""
def response_inner(function):
if not hasattr(function, 'doc_responses'):
setattr(function, 'doc_responses', [])
function.doc_responses.append((code, description))
return function
return response_inner | 4912d09c2e7bb30ebba7c2f6989160be4d124e8d | 117,007 |
def uniq(ls):
"""
uniqify a list
"""
return list(set(ls)) | 04ca82e898a446a931c44327f1d55146f8cbfce5 | 117,008 |
import io
def bfh(s):
"""utility function to convert string of hex into a BytesIO stream"""
return io.BytesIO(bytes.fromhex(s)) | d0ea2a9bb312789b5041ded625eba050edc933d9 | 117,017 |
def sort_batch_of_lists(uids, batch_of_lists, lens):
"""Sort batch of lists according to len(list). Descending"""
sorted_idx = [i[0] for i in sorted(enumerate(lens), key=lambda x: x[1], reverse=True)]
uids = [uids[i] for i in sorted_idx]
lens = [lens[i] for i in sorted_idx]
batch_of_lists = [batch_of_lists[i] for i in sorted_idx]
return uids, batch_of_lists, lens | a7c1a0315fc5d0effc730f7a4adcb19f484416d8 | 117,019 |
from math import ceil
def round_up(x,step):
"""Rounds x up to the next multiple of step."""
if step == 0: return x
return ceil(float(x)/float(step))*step | b2622fb4bff9d48f7e7b3a2aa3b9e0b8b89677ed | 117,022 |
def list_records(full_record):
"""Fixture list of records."""
list_records = {
"hits": {"hits": [full_record, full_record]},
}
return list_records | 3c906e0815f4ddc4d394439d42de4050cfe11e49 | 117,025 |
def define_outfile_cols_expand(samp_grps, ontology, mode):
"""
define columns for writing the expand output file
:param samp_grps: SampleGroups object
:param ontology: functional ontology. only required for 'f' or 'ft' modes
:param mode: f, t, or ft
:return: a list of relevant columns in the correct order
"""
int_cols = []
int_cols += samp_grps.mean_names + samp_grps.all_intcols
node_cols = []
if ontology != "cog":
node_cols += samp_grps.n_peptide_names_flat
# ft doesn't have samp_children
if mode != 'ft':
node_cols += samp_grps.samp_children_names_flat
quant_cols = int_cols + node_cols
if mode == 'f':
if ontology == 'go':
cols = ['id', 'name', 'namespace'] + quant_cols
elif ontology == 'cog':
cols = ['id', 'description'] + quant_cols
elif ontology == 'ec':
cols = ['id', 'description'] + quant_cols
else:
raise ValueError("Invalid ontology. Expected one of: %s" % ['go', 'cog', 'ec'])
elif mode == 't':
cols = ['id', 'taxon_name', 'rank'] + quant_cols
elif mode == 'ft':
cols = ['go_id', 'name', 'namespace', 'tax_id', 'taxon_name', 'rank'] + quant_cols
else:
raise ValueError("Invalid mode. Expected one of: %s" % ['f', 't', 'ft'])
return cols | d02eea1a1af1395a2cd1075c2eef8d791c91fabd | 117,028 |
def is_return_value(parameter: dict) -> bool:
"""Whether the parameter is marked as a return_value."""
return parameter.get("return_value", False) | d66a70630896de851dbce9baddcdb61a6551004e | 117,030 |
import math
def cos(degrees) -> float:
"""Calculate the cosine of the given degrees."""
return math.cos(math.radians(degrees)) | 60f3f7a363a30f0fbe9a2bd0bbb6a6287180dfeb | 117,031 |
def return_list_smart(lst):
"""
If the list has length > returns the list
if it has length == 1 it returns the element
if it has length == 0 it returns None
"""
if len(lst) > 1:
return lst
elif len(lst) == 1:
return lst[0]
else:
return None | 822ae1101395b5afe762c2b8afd1316e7312b442 | 117,034 |
def grid_maker(width, height):
"""Accepts width, height (ints). Returns widthxheight grid with '.' as values."""
grid = [['.' for i in range(width)] for j in range(height)]
return grid | bf765420c3032adc897ff7a579a877a7b94068e2 | 117,038 |
def adjust_indel_rates(expected):
""" adapt indel rates for lower rate estimate from validated de novos
The indel mutation rates from Samocha et al., Nature Genetics 46:944-950
assume that the overall indel mutation rate is 1.25-fold greater than the
overall nonsense mutation rate, ie there are 1.25 times as many frameshifts
as nonsense mutations. We have our own estimates for the ratio, derived from
our de novo validation efforts, which we shall apply in place of the Samocha
et al ratios.
Args:
rates: data frame of mutation rates.
Returns:
the rates data frame, with adjusted indel rates.
"""
# the following numbers were derived from the DDD 4K dataset.
nonsense_n = 411
frameshift_n = 610
ddd_ratio = frameshift_n / nonsense_n
samocha_ratio = 1.25 # Nature Genetics 46:944-950 frameshift to nonsense ratio
# correct back from using the Samocha et al. ratio
expected["missense_indel"] /= samocha_ratio
expected["lof_indel"] /= samocha_ratio
# adjust the indel rates for the DDD indel ratio
expected["missense_indel"] *= ddd_ratio
expected["lof_indel"] *= ddd_ratio
return expected | 39e9a282d8517d11e4715bed547e6b13a260619b | 117,050 |
import zipfile
def create_archive(archive_path, file_path, file_name=None, if_exists='replace'):
"""
Create and fill an archive.
`Args:`
archive_path: str
The file name of zip archive
file_path: str
The path of the file
file_name: str
The name of the file in the archive
if_exists: str
If archive already exists, one of 'replace' or 'append'
`Returns:`
Zip archive path
"""
if if_exists == 'append':
write_type = 'a'
else:
write_type = 'w'
if not file_name:
file_name = file_path.split('/')[-1]
with zipfile.ZipFile(archive_path, write_type) as z:
z.write(file_path, arcname=file_name, compress_type=zipfile.ZIP_STORED)
return archive_path | 0bdccbd1c753c7d61e9f5ffbb41df452bc78c16a | 117,051 |
def rotToQuat(obj):
""" return the rotation of the object as quaternion"""
if obj.rotation_mode == 'QUATERNION' or obj.rotation_mode == 'AXIS_ANGLE':
return obj.rotation_quaternion
else:
# eurler
return obj.rotation_euler.to_quaternion() | d869fede5bf3fa34282a94e3b22939ec08488fd0 | 117,052 |
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t | 70fe393e5e0ead1f11c4fa63dd8c81817b5d51b1 | 117,055 |
def check_if_bst(node, mini=float('-inf'), maxi=float('+inf')):
"""
Check if the given tree is Binary Search Tree (BST)
Args:
node: root node of the Tree. `node` arg must have `.left`, `.right` and `.data` variables
mini: min value - should be omitted
maxi: max value - should be omitted
Returns:
bool - True if it's BST and False if not
Examples:
Precondition:
>>> class Node:
... def __init__(self, data):
... self.data = data
... self.left = None
... self.right = None
>>> root = Node(4)
>>> root.left = Node(2)
>>> root.right = Node(6)
>>> root.left.left = Node(1)
>>> root.left.right = Node(3)
>>> root.right.left = Node(5)
>>> root.right.right = Node(7)
Example itself:
>>> check_if_bst(root)
True
"""
if node is None:
return True
if node.data < mini or node.data > maxi:
return False
return (check_if_bst(node.left, mini, node.data - 1) and
check_if_bst(node.right, node.data + 1, maxi)) | f0ebfc1dfc2c71211693b4b71ded34d3b45c5f6b | 117,057 |
import random
def random_colour(bits=24):
"""Returns a random 24-bit colour hex value."""
# For 24 bits, 1 << 24 == 0x1000000
# 0x1000000 - 1 = 0xFFFFFF.
return random.randint(0, (1 << bits) - 1) | 6b7b7aeb2564b0fc264302d850de106351d703b5 | 117,058 |
import math
def c_div(q, d):
"""
Arbitrary signed integer division with c behaviour.
>>> (c_div(10, 3), c_div(-10, -3), c_div(-10, 3), c_div(10, -3))
(3, 3, -3, -3)
>>> c_div(-11, 0)
Traceback (most recent call last):
...
ZeroDivisionError
"""
s = int(math.copysign(1, q) * math.copysign(1, d))
return s * int(abs(q) / abs(d)) | e7208fcb0129840ce16aa2a08070eb0f5d5032c4 | 117,059 |
def get_all_subclasses(base_class):
"""
Method for getting all child classes from a parent object. Taken from:
http://stackoverflow.com/questions/3862310/how-can-i-find-all-subclasses-of-a-class-given-its-name
Inputs:
:param class base_class: The parent class which we are looking towards.
Returns
:returns: class all_subclasses: A list of classes representing the child
classes of the base_class
"""
all_subclasses = []
for subclass in base_class.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses | 87c0251e8396efa83ea8c844063d6f4a02ecdfc2 | 117,060 |
def parselinje( linje ):
"""
Dekoder en linje på formen
200, download size: 'https://www.vegvesen.no/nvdb/api/v3/veg?veglenkesekvens=0.99786547288010718%401125762'
og returnerer en dictionary med følgende komponenter:
{ 'status' : 200,
'url' : 'https://www.vegvesen.no/nvdb/api/v3/veg?veglenkesekvens=0.99786547288010718@1125762',
'kortform' : '0.99786547288010718@1125762'
'veglenke' : 1125762,
'posisjon' : 0.997865479, # Rundet av til 8 siffer
'apiversjon' : 'v3',
'endepunkt' : 'https://www.vegvesen.no/nvdb/api/v3/veg'
}
"""
linje = linje.replace( '%40', '@')
status = int( linje.split(',')[0] )
url = linje.split("'")[1]
urlbiter= url.split( '?')
if 'v2' in urlbiter[0]:
apiversjon = 'v2'
elif 'v3' in urlbiter[0]:
apiversjon = 'v3'
else:
apiversjon = ''
kortform = urlbiter[1].split('=')[1]
return { 'status' : status,
'url' : url,
'kortform' : '0.99786547288010718@1125762',
'veglenke' : int( kortform.split('@')[1] ),
'posisjon' : float( kortform.split('@')[0] ),
'apiversjon' : apiversjon,
'endepunkt' : urlbiter[0]
} | 4edfadacf1de92ada11f0e4c64a85bf701cb5afe | 117,062 |
def unpack_ipv4_bytes(byte_pattern):
""" Given a list of raw bytes, parse out and return a list of IPs
:param byte_pattern: The raw bytes from the DHCP option containing
a list of IP addresses. The RFC specifies that an IP list will
be a list of octets, with each group of 4 octets representing
one IP address. There are no separators or terminators.
:returns: a list of IP addresses as strings"""
ip_list = []
# reverse the bytes so we can pop them off one at a time
byte_pattern.reverse()
while len(byte_pattern) > 3:
# if there are at least 4 octets, add them as an IP to the list
ip_string = ''
for i in range(0, 3):
ip_string += str(byte_pattern.pop()) + "."
ip_string += str(byte_pattern.pop())
ip_list.append(ip_string)
return ip_list | f2ba6dd77acef4871a8511663fc494f03320f1ba | 117,065 |
def stats(r):
"""returns the median, average, standard deviation, min and max of a sequence"""
tot = sum(r)
avg = tot/len(r)
sdsq = sum([(i-avg)**2 for i in r])
s = list(r)
s.sort()
return s[len(s)//2], avg, (sdsq/(len(r)-1 or 1))**.5, min(r), max(r) | 45c565246949b65d51fbc54e43955176ce05ee4c | 117,067 |
def calc_centered_grid(cols_list, hpad=.05, vpad=.05):
"""Calculates a centered grid of Rectangles and their positions.
Parameters
----------
cols_list : [int]
List of ints. Every entry represents a row with as many channels
as the value.
hpad : float, optional
The amount of horizontal padding (default: 0.05).
vpad : float, optional
The amount of vertical padding (default: 0.05).
Returns
-------
[[float, float, float, float]]
A list of all rectangle positions in the form of [xi, xy, width,
height] sorted from top left to bottom right.
Examples
--------
Calculates a centered grid with 3 rows of 4, 3 and 2 columns
>>> calc_centered_grid([4, 3, 2])
Calculates a centered grid with more padding
>>> calc_centered_grid([5, 4], hpad=.1, vpad=.75)
"""
h = (1 - ((len(cols_list) + 1) * vpad)) / len(cols_list)
w = (1 - ((max(cols_list) + 1) * hpad)) / max(cols_list)
grid = []
row = 1
for l in cols_list:
yi = 1 - ((row * vpad) + (row * h))
for i in range(l):
# calculate margin on both sides
m = .5 - (((l * w) + ((l - 1) * hpad)) / 2)
xi = m + (i * hpad) + (i * w)
grid.append([xi, yi, w, h])
row += 1
return grid | 63d2b92e142b8793491541899e7fbdbca9771900 | 117,068 |
import io
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f | b9651a903ac9d8e3efac9c3bad8403296effba24 | 117,072 |
def document_features(doc_words, top_words):
"""
Basic features extraction from a list of words in a document (doc_words).
For all words in top_words (most used in all corpus),
set True/False if the word is in the document's words set.
"""
# transform the list of words into a set to optimize search
doc_words_set = set(doc_words)
# build features dictionary
features = {}
for word in top_words:
features['contains(%s)' % word] = (word in doc_words_set)
return features | 037770f8de5dd2c45f16fd29943a838161b2f155 | 117,075 |
import random
def gen_a_random_probability(self):
"""generate a random probability obey uniform distribution
Returns:
float: a random probability
Examples:
>>> s = String()
>>> s.gen_a_random_probability()
0.5
"""
return random.uniform(0, 1) | 43c536a424df1bafedc3bb8ee7c021c8d3a7a440 | 117,078 |
def bytes_needed(i):
"""
Compute the number of bytes needed to hold arbitrary-length integer i
"""
bn = 1
while True:
i >>= 7
if i == 1:
# negative sign bit
return bn + 1
elif i == 0:
return bn
i >>= 1
bn += 1 | 9912881ce2b7a13e0b15287755e74caa06c7d99f | 117,079 |
def format_price(price):
"""
formats number 12345678 -> 12 345 678
"""
s_list = []
while price:
s_list.append(str(price % 1000))
price /= 1000
s_list.reverse()
return ' '.join(s_list).strip() | e72cdb410309759ffcf124f1ed57e783164bd122 | 117,085 |
def flatten_lists(*lists):
"""Flatten several lists into one list.
Examples
--------
>>> flatten_lists([0, 1, 2], [3, 4, 5])
[0, 1, 2, 3, 4, 5]
Parameters
----------
lists : an arbitrary number of iterable collections
The type of the collections is not limited to lists,
they can also be sets, tuples, etc.
Returns
-------
flat_list : a flattened list
"""
return [x for sublist in lists for x in sublist] | 23c25da8ac99c440543efdd52a7a98f2f378bd8b | 117,086 |
def trim_variable_postfixes(scope_str):
"""Trims any extra numbers added to a tensorflow scope string.
Necessary to align variables in graph and checkpoint
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with extra numbers trimmed off.
"""
idx = scope_str.find(':')
return scope_str[:idx] | eb4d4dc14d903129a7dcddd2df7419724aed425a | 117,090 |
def closestPointOnSegment1(p = "const %(Vector)s&",
a0 = "const %(Vector)s&",
a1 = "const %(Vector)s&",
result = "%(Vector)s&"):
"""Find the point on a line segment (a0,a1) closest to point (p).
This version return True if the closest point on the line is bounded by the segment, and False otherwise."""
return "bool" | 77a5d72a182456d438de7a0611e50b79a71269de | 117,091 |
from typing import List
def shift_list(l: List, n: int) -> List:
"""Return a new list based on the one given with its elements shifted n
times.
"""
return [l[(i - n) % len(l)] for i in range(len(l))] | 9c3e08a96bb2c861c9eb5da500fb8c45a3a5784f | 117,092 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.