content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def obsmarkersversion(caps):
"""extract the list of supported obsmarkers versions from a bundle2caps dict"""
obscaps = caps.get(b'obsmarkers', ())
return [int(c[1:]) for c in obscaps if c.startswith(b'V')] | 2784af31c00f7bc385ac254a33fdb5ef7c324cc8 | 111,956 |
def attach_domain(corpus, domt):
""" Indicates whether the corpus is src (source) or tgt
(target) corpus when doing trainsfer learning.
This will return a list of lists of the form ((w,t,d),iob), where
d is the domain ('src' or 'tgt') given by domt.
Parameters
----------
corpus : list
List of lists containing tuples of form ((w,t), iob)
domt : str
Either 'src' or 'tgt'.
"""
if domt not in {'src','tgt'}: # Domain type - source or target
raise ValueError("domt must be 'src' or 'tgt'.")
data_with_domain = [[((w,t,domt),iob) for ((w,t),iob) in d] for d in corpus]
return data_with_domain | 83a96a2d31e9aa1fb0075c316e0daacc23819b97 | 111,957 |
def is_basestring(value):
"""
Checks if value is string in both Python2.7 and Python3+
"""
return isinstance(value, (type(u''), str)) | 123a2ba74189c6aeef36dd5544cf3b96773c7c86 | 111,958 |
def crop_to_extent(data, xyz, extent):
"""Limits the data to the volume's extent
data is z, y, x ordered
xyz and extent are xyz ordered
"""
diff_extent = [e - i for e, i in zip(extent, xyz)]
data_clip = data[: diff_extent[2], : diff_extent[1], : diff_extent[0]]
return data_clip | 22fbacd914d218574bb811e70ccfec46b6685625 | 111,959 |
def getThresholds(settings):
"""Given a FEAT settings dictionary, returns a dictionary of
``{stat : threshold}`` mappings, containing the thresholds used
in the FEAT statistical analysis.
The following keys will be present. Threshold values will be ``None``
if the respective statistical thresholding was not carried out:
- ``p``: P-value thresholding
- ``z``: Z-statistic thresholding
:arg settings: A FEAT settings dictionary (see :func:`loadSettings`).
"""
p = settings.get('prob_thresh', None)
z = settings.get('z_thresh', None)
if p is not None: p = float(p)
if z is not None: z = float(z)
return {'p' : p, 'z' : z} | 92d638acb5db1ba02090f3f2cb2c93cc5c88af95 | 111,960 |
def literal_compile(s):
"""Compile a sql expression with bind params inlined as literals.
Parameters
----------
s : Selectable
The expression to compile.
Returns
-------
cs : str
An equivalent sql string.
"""
return str(s.compile(compile_kwargs={'literal_binds': True})) | 360b1069dabde1dc98ef0042ae82e77ee915df27 | 111,961 |
def _traverse(data, path):
"""
Recursively get the value specified by `path` from `data`.
Return None if the path does not lead to a value.
>>> nested_dict = {'root': {'nested_key': 'nested_value'}}
>>> _traverse(nested_dict, ['root', 'nested_key'])
'nested_value'
>>> _traverse(nested_dict, ['root', 'nonexistent_key'])
>>> _traverse(nested_dict, ['nonexistent_root', 'nonexistent_key'])
"""
def get_next_dict_value():
return data.get(path[0])
def get_next_list_value():
return data[0]
if not data:
return None
# Determine dataType and traverse accordingly
if isinstance(data, dict):
if len(path) == 1:
return get_next_dict_value()
return _traverse(get_next_dict_value(), path[1:])
elif isinstance(data, list):
if len(data) == 1:
return get_next_list_value()
return _traverse(get_next_list_value(), path[1:])
return None | 1b8bac99920e6e9407996bf30ecb8e953c76386b | 111,967 |
import base64
def b64_encode(s: bytes) -> str:
"""Encode bytes into a URL-safe Base64 string without padding
Parameters:
s (``bytes``):
Bytes to encode
Returns:
``str``: The encoded bytes
"""
return base64.urlsafe_b64encode(s).decode().strip("=") | d0851c57b66e3c3ce9fafbe375b3d99babf3156c | 111,969 |
from typing import Any
from typing import Optional
from typing import Callable
from unittest.mock import Mock
def mock_response(
status: int = 200,
content: str = "CONTENT",
data: Any = None,
read: Optional[Callable[[Any], Any]] = None,
) -> Mock:
"""A helper function to mock a responses with the given arguments.
Arguments:
status: Status code of response.
content: Content value of response.
data: The return value of response.
read: The read function of the response.
Returns:
Mocker of the response.
"""
response = Mock()
response.status_code = status
response.content = content
if data:
response.json = Mock(return_value=data)
if read:
response.read = read
response.__enter__ = lambda *args: response
response.__exit__ = lambda *args: None
return response | 8d52230d71ab89d41cdd929f088bd005b12380e4 | 111,973 |
def gen_rate_step(n, schedule):
"""
:param n: the current iteration number
:param schedule: a dictionary where the keys are the min value for the step and the values are the corresponding
rate; for example, {0: 0.005, 200: 0.0002, 400: 0.0001} is an annealing schedule where iterations
0 to 200 get a rate of 0.005, iterations 201 to 400 get a rate of 0.0002, and iterations >400 get
a rate of 0.0001; importantly, schedule dictionaries will be ordered by key, and the first key
must be 0
:return: the corresponding rate for the iteration number n
"""
sorted_keys = sorted(schedule.keys())
if len(sorted_keys) < 1 or sorted_keys[0] != 0:
raise Exception("the schedule must contain 0 as a key")
for k in reversed(sorted_keys):
if n > k:
return schedule[k]
if n == 0:
return schedule[0] | aa03266d292bbfd93d8ae53e7ad4d771ce94d20d | 111,975 |
def is_numeric(space, w_obj):
""" Finds whether a variable is a number or a numeric string"""
if w_obj.tp in [space.tp_float, space.tp_int]:
return space.w_True
if w_obj.tp == space.tp_str:
return space.newbool(w_obj.is_really_valid_number(space))
return space.w_False | 59a3664b47d181d060fda12a7d6a5bd4b5b9611a | 111,977 |
def plus(num):
"""add 1 to a number"""
return num + 1 | 0ad32fb87183aac92303539d101cc5f586cdc01f | 111,978 |
import re
def keep_only_letters(word_list):
"""
Remove all non letter charachter from each element of a list
:type word_list: list(str)
:param word_list: The list of word to remove all non-letters from
"""
new_word_list = list()
for word in word_list:
word = re.sub(r'[^A-Za-z-]+','',word)
new_word_list.append(word)
return new_word_list | fb27a7bcea4a0330f2f24d0546fbd9a65da7e74b | 111,985 |
def get_dis_factor(base_price):
"""Gets discount factor based on price"""
if base_price > 1000:
return 0.95
return 0.98 | 80baa9ad43b45c02f1b7f1e193a0d342ce92f7e1 | 111,990 |
def convert_dict_to_vdj_feature(d, reference):
""" Convert a dict to a VdjAnnotationFeature """
return reference.get_feature_by_id(d['feature_id']) | 4ad08829c93f906e3e5f49a849b55e07502d5836 | 111,993 |
def non_private_variance(freq, sampling_method, threshold):
"""The variance of non-private sampling."""
return freq * freq * (
(1.0 / sampling_method.inclusion_prob(freq, threshold)) - 1) | 143d1b91587e1b395384e32fea5d27b19d0a30e1 | 111,999 |
import json
def read_sequences_json(sequences_filename: str) -> dict:
"""
Simple helper method to load the contents of the sequences.json file
:param sequences_filename: full path to the sequences.json file
:return: The contents of the as a python dictionary
"""
with open(sequences_filename, "r") as f:
data = json.load(f)
return data | 6cc52296db83d0b1833f69e47fa369395e3d7efb | 112,000 |
from typing import List
from typing import Dict
def enrich_asset_properties(properties: List, properties_to_enrich_dict: Dict) -> Dict:
"""
Receives list of properties of an asset, and properties to enrich, and returns a dict containing the enrichment
Args:
properties (List): List of properties of an asset.
properties_to_enrich_dict (Dict): Properties to be enriched.
Returns:
(List[Dict]) List of new assets with enrichment.
"""
return {
properties_to_enrich_dict.get(prop.get('name')): {
'Value': prop.get('value'),
'LastUser': prop.get('last_reported_by')
} for prop in properties if prop.get('name') in properties_to_enrich_dict
} | 5aba396d96ad1b14f9099a4c023ce1acac67a4a7 | 112,004 |
def uch_matsen(gs, rhos, ut):
"""
Choking velocity from the Matsen paper [3]_.
Parameters
----------
gs : float
Solids flux [kg/(s m^2)]
rhos : float
Density of particle [kg/m^3]
ut : float
Terminal velocity [m/s]
Returns
-------
uch : float
Choking velocity [m/s]
References
----------
.. [3] Matsen, 1982. Mechanism of Choking and Entrainment. Powder
Technology, 32, 21-33.
"""
uch = 10.74 * ut * (gs / rhos)**0.227
return uch | b4b156f00607bd2bce7a99ff559a2010a44129f1 | 112,012 |
def sort_freq_dist(freqdict):
""" Sort frequency distribution. """
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
return aux | a457a0b52453ff6874d318b985b8934d7d4eb30b | 112,013 |
import pickle
def pickle_iffnn(data, fp, verbose=1):
"""
Write data to pickle if fp is not None
"""
if fp is not None:
if verbose:
if '_file' in vars(fp):
# getting the filename of MongoDB gridfs file
fname = vars(fp)['_file']['filename']
else:
# getting the filename of a os file
fname = fp.name
print(f'Writing {fname}')
fp.write(pickle.dumps(data))
return True
return False | 3398fc76d59497e89960e656ff29ee436ecb19dc | 112,015 |
import quopri
def decode_email(email):
"""
Decode email using quoted printable
:param email: The email to decode
:return: The decoded email content
"""
return quopri.decodestring(email.get_payload()) | 87e0c8f294d15cbd479640a43fa6eaf7f3450be2 | 112,018 |
def flatten_nested_dict_list(d, parent_key='', sep='_', item_key=''):
"""
Flatten a nested dict or list to a list.
For example, given a dict
{
a: 1
b: {
c: 2
}
c: 3
}
the function would return [(a, 1), (b_c, 2), (c, 3)]
Args:
d (dict, list): a nested dict or list to be flattened
parent_key (str): recursion helper
sep (str): separator for nesting keys
item_key (str): recursion helper
Returns:
list: a list of (key, value) tuples
"""
items = []
if isinstance(d, (tuple, list)):
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
for i, v in enumerate(d):
items.extend(flatten_nested_dict_list(v, new_key, sep=sep, item_key=str(i)))
return items
elif isinstance(d, dict):
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
for k, v in d.items():
assert isinstance(k, str)
items.extend(flatten_nested_dict_list(v, new_key, sep=sep, item_key=k))
return items
else:
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
return [(new_key, d)] | def7f9e410aad1c0927e5b6907b18a3c64749a34 | 112,019 |
def fib_binet(n):
"""
Constant time solution using Binet's Fibonacci Number Formula.
In real-use, GOLDEN_RATIO should be moved outside of this function
to avoid unneccesary re-computation.
"""
GOLDEN_RATIO = (1 + 5**0.5) / 2
return int((GOLDEN_RATIO**n - (-GOLDEN_RATIO)**(-n)) / (5**0.5)) | da0cf4d1aa33b87046665bc2668183e90b0130d8 | 112,022 |
def partial_find(chunk, until):
"""Finds `until` in chunk and splits after `until`.
If until is partially found at the end of the chunk,
it will split right before the partially found string
This function is used to consume as much of chunk as possible while
still able to append to chunk
"""
try:
pos = chunk.index(until)
except ValueError:
pass
else:
pos += len(until)
return chunk[:pos], chunk[pos:]
chunk_len = len(chunk)
until_len = len(until)
chunk_range = range(max(0, chunk_len - until_len), chunk_len)
until_range = range(min(until_len, chunk_len), 0, -1)
for chunk_pos, until_pos in zip(chunk_range, until_range):
if chunk[chunk_pos:] == until[:until_pos]:
return chunk[:chunk_pos], chunk[chunk_pos:]
return chunk, b"" | 19f2e3a3acaabdafc5a3fb9b2ca01d2fe1deae85 | 112,025 |
def generate_custom_field_resolver(name, resolver_name):
"""Generate function to resolve each custom field within each DjangoObjectType.
Args:
name (str): name of the custom field to resolve
resolver_name (str): name of the resolver as declare in DjangoObjectType
"""
def resolve_custom_field(self, info, **kwargs):
return self.cf.get(name, None)
resolve_custom_field.__name__ = resolver_name
return resolve_custom_field | 5558469a9fa3cbdd01e3ed57a72f75a986b9dd09 | 112,030 |
def write_header(columns, title="Research Engineering Project Allocations"):
"""write the table header (title and column names)"""
header = """<thead> <tr>
<th></th>
<th></th>
<td class="title" colspan={n_columns}>{title}</td>
</tr><tr>
<th class="blank" ></th>
<th class="blank" ></th>
""".format(
n_columns=len(columns), title=title
)
for colname in columns:
header += """<th class="header" >{colname}</th>
""".format(
colname=colname
)
header += """</tr></thead>
"""
return header | 1cc8e9b9c9322d29303efa2a683bf6ba2fa4ec15 | 112,035 |
def is_ontology_metadata(convention, metadatum):
"""Check if metadata is ontology from metadata convention
"""
try:
return bool(convention["properties"][metadatum]["ontology"])
except KeyError:
return False | 8308f0c5d03d4280cba3535db793679ee5f53bb9 | 112,036 |
def base_to_uint(encoded,
base,
ord_lookup_table,
powers):
"""
Decodes bytes from the given base into a big integer.
:param encoded:
Encoded bytes.
:param base:
The base to use.
:param ord_lookup_table:
The ordinal lookup table to use.
:param powers:
Pre-computed tuple of powers of length ``powers_length``.
"""
# Convert to big integer.
# number = 0
# for i, x in enumerate(reversed(encoded)):
# number += _lookup[x] * (base**i)
# Above loop divided into precomputed powers section and computed.
number = 0
length = len(encoded)
powers_length = len(powers)
for i, char in enumerate(encoded[length:-powers_length - 1:-1]):
number += ord_lookup_table[char] * powers[i]
for i in range(powers_length, length):
char = encoded[length - i - 1]
number += ord_lookup_table[char] * (base ** i)
return number | 1f6f4e3fb57dc03ecc82061cae2d7d423d5eac29 | 112,043 |
def played_card_dict(played_card):
"""
Returns a dictionary containing:
- the player who played the card
- the played card
:param played_card:
:return:
"""
return {
'player_id': played_card.player.id,
'card': str(played_card.card)
} | aca7ab87d43da512df135e9f60afcf50bb618bd0 | 112,044 |
def is_defined_in_module(o, module):
"""
Checks if an object is defined in a given module and not imported.
Args:
o (Type): Object to check
module (ModuleType): Module
Returns:
bool
"""
return o.__module__ == module.__name__ | 1b4e3a09a59396829580e13e18a933e1f4d4611e | 112,048 |
import json
def load_json_dict(fn):
"""Load a dictionary from a JSON-file"""
with open(fn, 'r') as f:
this_dict = json.load(f)
return this_dict | 1fd484c499585ebed2fbb99c573c442c9879c2eb | 112,049 |
def task_id(node):
"""
Extracts task ID from a project tree node.
:param Node node: project tree node.
:return: task ID.
:rtype: int
"""
return node.item.id | ff60b890f97af764e166c6a7e5558c1f081f4aeb | 112,053 |
def _bytes_to_unicode(obj):
"""
Convert a non-`bytes` type object into a unicode string.
:param obj: the object to convert
:return: If the supplied `obj` is of type `bytes`, decode it into a unicode
string. If the `obj` is anything else (including None), the original
`obj` is returned. If the object is converted into a unicode string,
the type would be `unicode` for Python 2.x or `str` for Python 3.x.
"""
return obj.decode() if isinstance(obj, bytes) else obj | 47b607978b2c43f544ae63f6de6a5d3ceff0bbbe | 112,055 |
import time
def wait_secs(finish_clock):
"""
calculate the number of seconds until a given clock time
The clock time should be the result of time.perf_counter()
Does NOT wait for this time.
"""
delay = finish_clock - time.perf_counter()
return max(delay, 0) | c2f6944a875c651203df20f8a6c4a31dfdadf5d8 | 112,057 |
def get_words(sentence):
""" (str) -> list
The function takes as input a string representing a sentence.
It returns a list of the words from the input string.
>>> s = "How wonderful it is that nobody need wait a single moment before starting to improve the world"
>>> x = get_words(s)
>>> x == ['How', 'wonderful', 'it', 'is', 'that', 'nobody', 'need', 'wait', 'a', \
'single', 'moment', 'before', 'starting', 'to', 'improve', 'the', 'world']
True
>>> s = "Today, I will be staying home all day"
>>> get_words(s)
['Today', 'I', 'will', 'be', 'staying', 'home', 'all', 'day']
>>> s = "Okay; here's a pretty-punctuated sentence: 'Hi, -- how are you'"
>>> get_words(s)
['Okay', 'here', 's', 'a', 'pretty', 'punctuated', 'sentence', 'Hi', 'how', 'are', 'you']
>>> s = ""
>>> get_words(s)
[]
"""
# define punctuation symbols that separate phrases
punctuations = [',', '-', '--', ':', ';', '"', "'"]
# replace the punctuation by white spaces
for punctuation in punctuations:
sentence = sentence.replace(punctuation, " ")
# split the sentence into words depending on where white spaces are
words = sentence.split()
# return the list of words from the input sentence
return words | 2d7d809372197db0fe9c876705742847c070826c | 112,075 |
import re
def listen_to(regex, flags=re.IGNORECASE):
"""Listen to messages matching a regex pattern
This decorator will enable a Plugin method to listen to messages that match a regex pattern.
The Plugin method will be called for each message that matches the specified regex pattern.
The received :py:class:`~machine.plugins.base.Message` will be passed to the method when called.
Named groups can be used in the regex pattern, to catch specific parts of the message. These
groups will be passed to the method as keyword arguments when called.
:param regex: regex pattern to listen for
:param flags: regex flags to apply when matching
:return: wrapped method
"""
def listen_to_decorator(f):
f.metadata = getattr(f, "metadata", {})
f.metadata['plugin_actions'] = f.metadata.get('plugin_actions', {})
f.metadata['plugin_actions']['listen_to'] = \
f.metadata['plugin_actions'].get('listen_to', {})
f.metadata['plugin_actions']['listen_to']['regex'] = \
f.metadata['plugin_actions']['listen_to'].get('regex', [])
f.metadata['plugin_actions']['listen_to']['regex'].append(re.compile(regex, flags))
return f
return listen_to_decorator | 2aeb3cade80c55296bea97ef2c9d9598b8dcfbbc | 112,079 |
def is_leaf(tree, node):
"""check if node is a leaf in tree."""
return len(list(tree.neighbors(node))) == 0 | 2bd31410496cc58b1137186cf97462f35787b885 | 112,082 |
def first_item(l):
"""Returns first item in the list or None if empty."""
return l[0] if len(l) > 0 else None | 22c6f79da17ea9e130024357660e04b28e42706b | 112,088 |
def slice_epitope_predictions(
epitope_predictions,
start_offset,
end_offset):
"""
Return subset of EpitopePrediction objects which overlap the given interval
and slice through their source sequences and adjust their offset.
"""
return [
p.slice_source_sequence(start_offset, end_offset)
for p in epitope_predictions
if p.offset >= start_offset and p.offset + p.length <= end_offset
] | 707dd026a2cd004cdcd24f8379a7ae772c42f6ae | 112,090 |
import json
def format_json(batched):
"""Format raw data into json format"""
aggregated_events = []
for event in batched:
aggregated_events.append(json.dumps(event))
aggregated_events.append("\n")
return aggregated_events | 9d335a01540ee7b95dfd094bbd5433543b4ef599 | 112,092 |
def _get_contig_based_lessthan(contigs):
"""Returns a callable that compares variants on genomic position.
The returned function takes two arguments, both of which should be Variant
protos or None. The function returns True if and only if the first Variant is
strictly less than the second, which occurs if the first variant is on a
previous chromosome or is on the same chromosome and its entire span lies
before the start position of the second variant. `None` is treated as a
sentinel value that does not compare less than any valid Variant.
Args:
contigs: list(ContigInfo). The list of contigs in the desired sort order.
Returns:
A callable that takes two Variant protos as input and returns True iff the
first is strictly less than the second. Note that if the variant has a
contig not represented in the list of contigs this will raise IndexError.
"""
contig_index = {contig.name: i for i, contig in enumerate(contigs)}
def lessthanfn(variant1, variant2):
if variant1 is None:
return False
if variant2 is None:
return True
contig1 = contig_index[variant1.reference_name]
contig2 = contig_index[variant2.reference_name]
return (contig1 < contig2 or
(contig1 == contig2 and variant1.end <= variant2.start))
return lessthanfn | 42ccd3b3af47e6008b2328cd91a91634ad24f56b | 112,095 |
def classToDict(obj=None):
"""
Transform an object into a dict so it can be JSONified.
Useful for turning custom classes into JSON-compatible dictionaries.
"""
if obj == None:
return {}
_obj = {}
_obj.update(obj.__dict__)
return _obj | d9f4aa85479fdd00ac25aeb6a8ca5ec08644473f | 112,096 |
def _match_all(s, keywords):
"""
True if all strings in keywords are contained in s, False otherwise.
Case-insensitive.
:param s: string
:param keywords: a tuple containing keywords that should all be included
:return: True if all strings in keywords are contained in s, False otherwise
"""
for kw in keywords:
if kw.lower().strip() not in s.lower().strip():
return False
return True | b53afb0bd392a0700637dd720e996350694934c4 | 112,100 |
import re
def _convert_tnt_treeline(line: str) -> str:
"""
Prepare TNT-tree line
Args:
line: str, a row or line from TNT-tree file
Returns: str
"""
line = line.strip().strip(";")
line = line.replace(".", "_")
line = re.sub(r"([\w|\d])\s([\w|(])", r"\1, \2", line)
line = re.sub(r"\)\(", r"), (", line)
return line | 141a605e3dfebbfdb60db9ba4c3a05e07b19b634 | 112,101 |
def convertLatLongToPixels(mapImg, leftLongitude, rightLongitude, topLatitude, bottomLatitude, latLong):
"""Convert given lat/long coordinates into pixel X/Y coordinates given map and its borders
Args:
mapImg (Image): map image
left/right/top/bottom: borders of map
latlong (list): (lat, long)
Returns:
(x, y) pixel values of cooressponding pixel in image
"""
latitude = min(max(latLong[0], bottomLatitude), topLatitude)
longitude = min(max(latLong[1], leftLongitude), rightLongitude)
diffLat = topLatitude - bottomLatitude
diffLong = rightLongitude - leftLongitude
pixelX = (longitude - leftLongitude)/diffLong*mapImg.size[0]
pixelX = max(min(pixelX, mapImg.size[0] - 1), 0)
pixelY = mapImg.size[1] - (latitude - bottomLatitude)/diffLat*mapImg.size[1]
pixelY = max(min(pixelY, mapImg.size[1] - 1), 0)
return (pixelX, pixelY) | 1c114d385eee705e4ebc78efb33adab09dd036c8 | 112,103 |
def step(hparams, agent, state, env, worker_id):
""" run envrionment for one step and return the output """
if hparams.render:
env.render()
action = agent.act(state, worker_id)
state, reward, done, _ = env.step(action)
if done:
state = env.reset()
return action, reward, done, state | 4ceacb10e36ef9057077fc2e30e1ac51c8357285 | 112,104 |
def make_dsn(uri):
"""Convert a URI object to a PostgreSQL DSN string."""
dsn = "dbname=%s" % uri.database
if uri.host is not None:
dsn += " host=%s" % uri.host
if uri.port is not None:
dsn += " port=%d" % uri.port
if uri.username is not None:
dsn += " user=%s" % uri.username
if uri.password is not None:
dsn += " password=%s" % uri.password
return dsn | 7f5e1f0770fdc127e90cc7f93b633005422632a2 | 112,106 |
def length(v):
"""
Calculate the length of a vector.
Arguments:
v: tuple of numbers
Returns:
The length of the vector.
"""
return sum(j * j for j in v) ** 0.5 | 0494e313057ab9577ee3b0593760aed736dfce29 | 112,109 |
def apply_adjustment(df, adj_date, adj_value,
adj_type='mul',date_col='date',
cols=['open','high', 'low', 'close']):
"""
Apply adjustment to a given stock
df
dataframe of the given stock
adj_date
date from which the adjustment is
to be made
adj_value
value to be adjusted
adj_type
method of adjustment **mul/sub**
mul means multiply all the values
such as splits and bonuses
sub means subtract the values
such as dividends
date_col
date column on which the adjustment
is to be applied
cols
columns to which the adjustment is to
be made
Notes
-----
1) You can use negative values to add to the
stock value by using **adj_type=sub**
2) Adjustment is applied prior to all dates
in the dataframe
3) In case your dataframe has date or
symbol as indexes, reset them
"""
df = df.set_index(date_col).sort_index()
values_on_adj_date = df.loc[adj_date, cols].copy()
if adj_type == "mul":
adjusted_values = (df.loc[:adj_date, cols] * adj_value).round(2)
elif adj_type == "sub":
adjusted_values = (df.loc[:adj_date, cols] - adj_value).round(2)
else:
raise ValueError('adj_type should be either mul or sub')
df.loc[:adj_date, cols] = adjusted_values
df.loc[adj_date, cols] = values_on_adj_date
return df.reset_index() | 9896183aa1dbde06f99c52b0526825d3745e4ee3 | 112,114 |
def new_filename(file, ntype, snr):
"""Append noise tyep and power at the end of wav filename."""
return file.replace(".WAV", ".WAV.{}.{}dB".format(ntype, snr)) | 98507a8908dc05b51dd1a33de14e3ba140f84db0 | 112,115 |
def get_bucket_key(s3_loc):
"""
From a full s3 location, return the bucket and key
"""
if not s3_loc.startswith('s3://'):
raise Exception(f"{s3_loc} is not a properly formatted key")
bucket = s3_loc.split('/')[2]
key = '/'.join(s3_loc.split('/')[3:])
return bucket, key | 0f22fcca6baf7d96e3c6d6cfb78450b28b619378 | 112,116 |
import unicodedata
def normalize_casefold(text, *,
_casefold=str.casefold, _normalize=unicodedata.normalize
):
"""Normalize text data for caseless comparison
Use the "canonical caseless match" algorithm defined in the Unicode Standard,
version 10.0, section 3.13, requirement D146 (page 159).
"""
# Taken from https://stackoverflow.com/questions/319426/how-do-i-do-a-case-insensitive-string-comparison#comment60758553_29247821
return _normalize('NFKD',
_casefold(_normalize('NFKD',
_casefold(_normalize('NFD', text))))) | 7779919e7c3d46b13b9459ed6135f234645d57b2 | 112,119 |
def collapsed_nested_count_dict(counts_dict, all_ids, order=None):
"""
Takes a nested dictionary `counts_dict` and `all_ids`, which is
built with the `table_dict`. All files (first keys) in
`counts_dict` are made into columns with order specified by
`order`.
Output is a dictionary with keys that are the id's (genes or
transcripts), with values that are ordered counts. A header will
be created on the first row from the ordered columns (extracted
from filenames).
"""
if order is None:
col_order = counts_dict.keys()
else:
col_order = order
collapsed_dict = dict()
for i, filename in enumerate(col_order):
for id_name in all_ids:
if not collapsed_dict.get(id_name, False):
collapsed_dict[id_name] = list()
# get counts and append
c = counts_dict[filename].get(id_name, 0)
collapsed_dict[id_name].append(c)
return {'table':collapsed_dict, 'header':col_order} | de1b9d1f8efe0d5de3c3a08e91cba75a6ab6ea28 | 112,121 |
def lookup_password(words, uhpd):
"""
lookup_password(words, (user, host, port, database)) -> password
Where 'words' is the output from pgpass.parse()
"""
user, host, port, database = uhpd
for word, (w_host, w_port, w_database, w_user) in words:
if (w_user == '*' or w_user == user) and \
(w_host == '*' or w_host == host) and \
(w_port == '*' or w_port == port) and \
(w_database == '*' or w_database == database):
return word | 33f68e19e18faeef461c0e9c24f7df680c8b0945 | 112,122 |
def get_first(tokens, exclude_comment=True):
"""Given a list of tokens, find the first token which is not a space token
(such as a ``NEWLINE``, ``INDENT``, ``DEDENT``, etc.) and,
by default, also not a ``COMMMENT``.
``COMMMENT`` tokens can be included by setting ``exclude_comment`` to ``False``.
Returns ``None`` if none is found.
"""
for token in tokens:
if token.is_space() or (exclude_comment and token.is_comment()):
continue
return token
return None | 3c505930a74fbbbfb823a780882647082dbf5bb3 | 112,123 |
def user_login(session, username, password):
"""Log a user into NEXT
Args:
session:
obj: a request session
username:
str: the username of the user you want logged in
password:
str: the password of the user to be logged in
"""
login = {"id": username, "password": password}
response = session.post("http://rbac-server:8000/api/authorization/", json=login)
if "token" in response.json():
token = "Bearer " + response.json()["token"]
session.headers.update({"Authorization": token})
return response | a341389bcbd07599c37032ad14af26da2facc109 | 112,125 |
import re
def MigrateImports(content):
"""Updates import statements from TestNG to JUnit."""
content_new = re.sub('org.testng.annotations.Test', 'org.junit.Test', content)
content_new = re.sub('org.testng.annotations.BeforeMethod;',
'org.junit.Before;', content_new)
content_new = re.sub('org.testng.annotations.BeforeClass;',
'org.junit.BeforeClass;', content_new)
content_new = re.sub(
'import org.testng.annotations.DataProvider;',
'''import com.tngtech.java.junit.dataprovider.DataProvider;
import com.tngtech.java.junit.dataprovider.DataProviderRunner;
import com.tngtech.java.junit.dataprovider.UseDataProvider;
import org.junit.runner.RunWith;''', content_new)
# for remaining imports such as assertEquals
content_new = re.sub('testng', 'junit', content_new)
return content_new | 14a9dd9882376189b1767119437fd31ce54fd26e | 112,127 |
import six
import hashlib
def get_md5(input_data):
"""return md5 from string or unicode"""
if isinstance(input_data, six.text_type):
byte_data = input_data.encode("utf-8")
else:
byte_data = input_data
return hashlib.md5(byte_data).hexdigest() | 653868e197891cd80cfd9ef548ed64ab5a7a3e32 | 112,128 |
def get_compatible_version(version):
"""Return the compatible version.
:arg str version: Version string.
:return: The compatible version which could be used as ``~={compatible_version}``.
:rtype: str
Suppose the version string is ``x.y.z``:
* If ``x`` is zero then return ``x.y.z``.
* Otherwise return ``x.y``.
"""
if version.startswith("0."):
return version
return ".".join(version.split(".")[:2]) | cc38ab979bf248dda5cd223ed49cb880797a96e5 | 112,131 |
def edit_distance(a, b):
"""Returns the Hamming edit distance between two strings."""
return sum(letter_a != letter_b for letter_a, letter_b in zip(a, b)) | a290cd521455ed7bee7d1ac143ce2699d1670687 | 112,132 |
def command_requires_log_name(command):
"""Check if the specified command requires the --audit-log-name option.
command[in] command to be checked
"""
return command == "COPY" | 7128a8142cfb2b42b6cee23c3e2e25b5692f1f31 | 112,138 |
def summarize_variables(variables):
"""Return a string with a table of names and shapes of the given variables."""
cols = [['Name'], ['Shape']]
for var in variables:
shape = var.get_shape().as_list()
cols[0].append(var.name)
cols[1].append(str(shape))
widths = [max(len(x) for x in col) for col in cols]
lines = [' '.join(text.ljust(widths[i]) for i, text in enumerate(row)) for row in zip(*cols)]
lines.insert(1, ' '.join('-' * w for w in widths))
return '\n'.join(lines) | 82ac02746165c614758f7086ba3cfca5007b8ac4 | 112,141 |
def isAmbiguous(dt):
"""Helper function to check if a datetime is ambiguous. This is necessary
because GridLAB-D uses MySQL's timestamp datatype, which has no timezone
or DST information. This function strips off a datetime object's tzinfo,
then checks if the time would be ambiguous without that information.
"""
dtNaive = dt.replace(tzinfo=None)
return dt.tzinfo.is_ambiguous(dtNaive) | 2f5b803ab306561dde3fad4542de3107d40ef1b0 | 112,142 |
def get_useful_fields(repos, repo_count):
"""Selects the useful fields from the repos
Arguments:
repos {list} -- the list of repos to be cleaned
repo_count {int} -- the number of repos
Returns:
{dict} -- the standard output format for the program
"""
filtered_repos = []
for repo in repos:
filtered_repos.append({
"name": repo["name"],
"language": repo["language"],
"stars": repo["stargazers_count"],
"forks": repo["forks_count"],
"issues": repo["open_issues_count"]
})
return {
"Number of repositories": repo_count,
"Repositories": filtered_repos
} | aeef91a8d0078eac7fcdc833c36a6335f7049d6d | 112,145 |
def get_vertices(edges: list) -> list:
"""Get all the vertices belonging to input edges
:param edges: edges in the graph
:return: vertices
"""
edges_list_of_tuples = [list(e) for e in edges]
return list(set(sum(edges_list_of_tuples, []))) | f59e343173fe74e4d6b28c53a26b2e294bb491b5 | 112,146 |
def get_app_or_pod_id(app_or_pod):
"""Gets the app or pod ID from the given app or pod
:param app_or_pod: app or pod definition
:type app_or_pod: requests.Response
:return: app or pod id
:rtype: str
"""
return app_or_pod.get('app', app_or_pod.get('pod', {})).get('id') | a4b036211662ebd79056b33a1713e24bb6d14eb4 | 112,147 |
import requests
def response_from_server(url, image_file, verbose=True):
"""Makes a POST request to the server and returns the response.
Args:
url (str): URL that the request is sent to.
image_file (_io.BufferedReader): File to upload, should be an image.
verbose (bool): True if the status of the response should be printed. False otherwise.
Returns:
requests.models.Response: Response from the server.
"""
files = {'file': image_file}
response = requests.post(url, files=files)
status_code = response.status_code
if verbose:
msg = "Everything went well!" if status_code == 200 else "There was an error when handling the request."
print(msg)
return response | 8873f9629d15f7dedc53591a53e9225ead4ba154 | 112,152 |
from typing import List
def build_ngram(sentence: str, N: int = 2) -> List[str]:
"""split the sentence into N-gram
Args:
sentence (str): the sentence input
N (int, optional): the n parameter. Defaults to 2
Returns:
list(str): list of substrings
"""
return [sentence[i:i+N] for i in range(len(sentence)-N+1)] | e99e6abe054aad328474b156c20be8c7cf5f0b04 | 112,155 |
from typing import Callable
def run_main(main: Callable[[], None]):
"""
Runs the main function. Add try/catch wrappers or whatever you need here.
That's useful in case you want to have several points to call main().
Parameters
----------
main
Main function
"""
return main() | b1a0e1b8757147f5a5eaea7169780148fb48d370 | 112,157 |
from typing import List
def list_difference(lst1: List, lst2: List) -> List:
"""Get difference between the values of two lists
Parameters
----------
lst1 : List
First list
lst2 : List
Second list
Returns
-------
Difference list
"""
return [value for k, value in enumerate(lst1) if not value in lst2] + [
value for k, value in enumerate(lst2) if not value in lst1
] | 845dc1851fb790d68a6b0c1c1cf36dc51efe2dae | 112,161 |
from pathlib import Path
from typing import Iterator
def _subdirs(base_dir: Path) -> Iterator[Path]:
"""Return the subdirectories inside the given directory."""
return (p for p in base_dir.iterdir() if p.is_dir()) | e1b4fb51246e98aaab7bba5e8fff3a9ab9f2eedd | 112,163 |
def get_chunks(seg_dur, audio_id, audio_duration):
"""Get all chunk segments from a utterance
Args:
seg_dur (float): segment chunk duration, seconds
audio_id (str): utterance name,
audio_duration (float): utterance duration, seconds
Returns:
List: all the chunk segments
"""
num_chunks = int(audio_duration / seg_dur) # all in seconds
chunk_lst = [
audio_id + "_" + str(i * seg_dur) + "_" + str(i * seg_dur + seg_dur)
for i in range(num_chunks)
]
return chunk_lst | 61dc72efad49dc3f2435ec72941bf9ba27979c06 | 112,164 |
def obj_name(value, arg):
"""
Returns object name from an absolute path seperated by provided 'arg'.
e.g.
For a path seperated by '/' like '/pa/th/to/file.ext' it returns 'file.ext'.
"""
return value.split(arg)[-1] | ba8faf3c2be017ef712764cde556d3b32b89552f | 112,166 |
def decoder(permutation):
"""
decoder takes a permutation array and returns the
corresponding depermutation array necessary to decode
a permuted string
"""
depermutation = []
for x in range (0, len (permutation)):
depermutation.append (permutation.index(x))
return depermutation | 5df53b3faa14ff8014859f124a869e8fbbbeeb48 | 112,175 |
import tokenize
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with tokenize.open(filename) as stream:
return stream.read() | 5fef8eb1c1d131433ba403942471b0cbb71604e6 | 112,176 |
def set_piece_priorities(torrent_client, params):
"""
Set priorities for all pieces in a torrent
params['info_hash']: str - torrent info-hash
params['priority']: int - priority from 0 to 7.
:return: 'OK'
"""
torrent_client.set_piece_priorities(params['info_hash'], params['priority'])
return 'OK' | 2539b47753c22d6b4cf0cfb4cb5e1bbee931d70f | 112,177 |
def file_to_set(file_name):
"""Read a file and convert each line to set of items
Args:
file_name (str): name of file
Returns:
results (set): set of words from file
"""
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results | 389646aa4030b1c1992418c5cf6b0e558bc00736 | 112,185 |
from datetime import datetime
def datetime_filter(src, fmt="%b %e, %I:%M%P"):
"""Convert a datetime into a human-readable string."""
if isinstance(src, int):
src = datetime.fromtimestamp(src)
return src.strftime(fmt) | d841d9939d0e9edad5eca7f2c51ba488c1f3961a | 112,188 |
def get_response(link, phase, params, fail_msg):
"""Get response and check if the state is correct."""
response = next(link.receive())
response = response.split(',')
if len(response) != params or response[0] != phase:
raise RuntimeError(fail_msg)
return response | f444e4947debae4a53695bc1c28766fe38c15d8f | 112,189 |
def flatten(lst):
"""Flattens a list of lists"""
return [subelem for elem in lst for subelem in elem] | 34b3c041a0d6bd6cb46bf15b218ac29be1fba566 | 112,190 |
def truncate_long_string(data, maxlen=75):
""" Truncates strings longer than maxlen
"""
return (data[:maxlen] + "...") if len(data) > maxlen else data | e7f23e54429391ac394e06e805a2a630fea1f58b | 112,194 |
def camel_case(snake_str):
"""
Returns a camel-cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> camel_case('foo_bar')
"fooBar"
"""
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + "".join(x.title() for x in components[1:]) | 4e202d63f8e8c971597e99ee52af73c03cecb632 | 112,199 |
def weekday_to_bits(int):
"""
Converts a weekday integer to its associated bit value.
:param int: The integer corresponding to a datetime.weekday().
:return: A bit value.
"""
return {
0: 0b1,
1: 0b10,
2: 0b100,
3: 0b1000,
4: 0b10000,
5: 0b100000,
6: 0b1000000
}[int] | f742418f3203032d9a4b636112c50b48b1a3c6df | 112,206 |
from typing import List
from typing import Any
from typing import Dict
def map_frecuency_data(data: List[Any]) -> Dict[Any, int]:
"""Counts the absolute frecuency of a data"""
mapped = {}
for element in data:
mapped[element] = mapped.get(element, 0) + 1
return mapped | 84b4504454d8df3a8965cadae2b3d5c5556ffa0e | 112,207 |
import torch
def validate_bbox3d(boxes: torch.Tensor) -> bool:
"""Validate if a 3D bounding box usable or not. This function checks if the boxes are cube or not.
Args:
boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape
of Bx8x3, where each box is defined in the following ``clockwise`` order: front-top-left, front-top-right,
front-bottom-right, front-bottom-left, back-top-left, back-top-right, back-bottom-right, back-bottom-left.
The coordinates must be in the x, y, z order.
"""
if not (len(boxes.shape) == 3 and boxes.shape[1:] == torch.Size([8, 3])):
raise AssertionError(f"Box shape must be (B, 8, 3). Got {boxes.shape}.")
left = torch.index_select(boxes, 1, torch.tensor([1, 2, 5, 6], device=boxes.device, dtype=torch.long))[:, :, 0]
right = torch.index_select(boxes, 1, torch.tensor([0, 3, 4, 7], device=boxes.device, dtype=torch.long))[:, :, 0]
widths = left - right + 1
if not torch.allclose(widths.permute(1, 0), widths[:, 0]):
raise AssertionError(f"Boxes must have be cube, while get different widths {widths}.")
bot = torch.index_select(boxes, 1, torch.tensor([2, 3, 6, 7], device=boxes.device, dtype=torch.long))[:, :, 1]
upper = torch.index_select(boxes, 1, torch.tensor([0, 1, 4, 5], device=boxes.device, dtype=torch.long))[:, :, 1]
heights = bot - upper + 1
if not torch.allclose(heights.permute(1, 0), heights[:, 0]):
raise AssertionError(f"Boxes must have be cube, while get different heights {heights}.")
depths = boxes[:, 4:, 2] - boxes[:, :4, 2] + 1
if not torch.allclose(depths.permute(1, 0), depths[:, 0]):
raise AssertionError(f"Boxes must have be cube, while get different depths {depths}.")
return True | 9b6d5a73423466252bb13607a98d14a30899fc73 | 112,210 |
def get_dbot_score(verdict):
"""
Evaluate the dbot (Demisto) score as per verdict from SlashNext cloud API
:param verdict: SlashNext verdict on a certain IoC
:return: Dbot score
"""
if verdict == 'Malicious':
return 3
elif verdict == 'Suspicious':
return 2
elif verdict == 'Benign' or verdict == 'Redirector':
return 1
else:
return 0 | f33286fffe7afe0e31d673306150cfac97dcf371 | 112,213 |
def burn(f1, m1):
"""If the user is burned and trying to use a physical move, return
0.5, otherwise return 1, unless the user's ability is Guts, in
which case also return 1.
"""
if (('burn' in f1.status) and (f1.ability != 62) and
(m1.damage_class_id == 2)):
return 0.5
else:
return 1 | bd43524db39b238367d9a10c3c003d1834cf039c | 112,214 |
def every(pred, seq):
""" returns True iff pred is True for every element in seq """
for x in seq:
if not pred(x): return False
return True | 8bb505b405231d97006722e898aec49184f1a732 | 112,216 |
import re
def get_include_count(f):
"""Get number of #include statements in the file"""
include_count = 0
for line in f:
if re.match(r'\s*#\s*include', line):
include_count += 1
return include_count | b85b0c2c0c9d6ee1ec209ea2aef93fc62c296a02 | 112,218 |
def predict_source_position_in_camera(cog_x, cog_y, disp_dx, disp_dy):
"""
Compute the source position in the camera frame
Parameters
----------
cog_x: float or `numpy.ndarray` - x coordinate of the center of gravity (hillas.x)
cog_y: float or `numpy.ndarray` - y coordinate of the center of gravity (hillas.y)
disp_dx: float or `numpy.ndarray`
disp_dy: float or `numpy.ndarray`
Returns
-------
source_pos_x, source_pos_y
"""
reco_src_x = cog_x + disp_dx
reco_src_y = cog_y + disp_dy
return reco_src_x, reco_src_y | e21bb7afd21a7ffdf2d69e8102ae40c67c91a563 | 112,221 |
def total_wabbits(months, lifespan):
"""
Counts the total number of pairs of rabbits that will remain after the specific(months) month
if all rabbits live for some (lifespan) months.
Args:
months (int): number of months.
lifespan (int): lifespan in months.
Returns:
int: The total number of pairs of rabbits.
"""
previous = [1] + (lifespan - 1) * [0]
for month in range(2, months + 1):
next = sum(previous[1:])
previous = [next] + previous[:-1]
return sum(previous) | 5afe751b02a70a3881a5e8a1a9e4a6fed40feefc | 112,222 |
def requires_2fa(response):
"""Determine whether a response requires us to prompt the user for 2FA."""
if (
response.status_code == 401
and "X-GitHub-OTP" in response.headers
and "required" in response.headers["X-GitHub-OTP"]
):
return True
return False | 53a1ddfd0b4deaf49155881e9909cc0e6be4f2ac | 112,223 |
def rplog_convert(df, pwave_sonic, shear_sonic):
"""
Convert usec/ft (DT/DTS) to velocity (VP/VS)
Create Impedance logs from velocity and density logs (IP/IS)
Create VP/VS ratio log
"""
try:
df['VP'] = 304800 / df[pwave_sonic]
df["IP"] = df.VP * df.RHOB
df['VS'] = 304800 / df[shear_sonic]
df['VPVS'] = df.VP / df.VS
df['IS'] = df.VS * df.RHOB
except Exception as e:
print(f"Error when creating log: {e}")
return df | 4c2cc1aa79dfc19d9427b9bd35ad594e09b973ee | 112,229 |
import inspect
def _iscallable(obj):
"""Check if an object is a callable object safe for inspect.find_file."""
return inspect.ismodule(obj) or inspect.isclass(obj) or inspect.ismethod(
obj) or inspect.isfunction(obj) or inspect.iscode(obj) | 6697973dfdc96c4095175c74c7847cc8ec689f2f | 112,233 |
import operator
def sub_tuples(a: tuple, b: tuple) -> tuple:
"""
Subtract subsequent elements of a tuple from each other.
a - b
:param a: first tuple
:param b: second tuple
:return: result tuple
"""
return tuple(map(operator.sub, a, b)) | bc3e28c6b3178d3d8a2196dd8e2594977a1d72c4 | 112,236 |
def tensorclass(request):
"""A pytest fixture that returns the tensor class currently being tested.
"""
return request.param | a7f5eda1ad2b2fd1b4d5ec27d7ae44b490de6788 | 112,237 |
def fwhm(fwhm_now, lambda_now, secz_now, lambda_next, secz_next):
"""Predict the fwhm.
Parameters:
-----------
fwhm_now : The fwhm value of the current exposure
lambda_now : The wavelength of the current exposure
secz_now : The airmass of the current exposure
lambda_next : The wavelength of the next exposure
secz_next : The airmass of the next exposure
Returns:
--------
fwhm_next : The predicted fwhm of the next exposure
"""
lambda_now, secz_now = map(float,[lambda_now, secz_now])
lambda_next, secz_next = map(float,[lambda_next, secz_next])
fwhm_next = fwhm_now * (lambda_next/lambda_now)**(-0.2) * (secz_next/secz_now)**(0.6)
return fwhm_next | ffa9766af6de26b17f2fa193dd4232e0633f4216 | 112,238 |
def normalize_reference_name(name):
"""
Simplify potentially variable ways to write out a reference name
by lowercasing it and replacing all dashes and spaces with underscores.
Parameters
----------
name : str
Returns
-------
str
"""
return name.strip().lower().replace("-", "_").replace(" ", "_") | ea77d849f58ad1a7a0722eb00927fa218ce12841 | 112,239 |
def _default_component(data):
"""
Choose a default ComponentID to display for data
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0] | ce88889378f4309d4dd96d28e0b54040fcfe6b2c | 112,242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.