content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def inv_transform_vertex(x, phi):
"""
Given a vertex id x and a set of partial isomorphisms phi.
Returns the inverse transformed vertex id
"""
for _phi in phi:
if _phi[1] == x:
return _phi[0]
raise Exception('Could not find inverse transformation') | 0e2bf3429d90970e3a7749a0d9ed4fb29b55cc66 | 119,027 |
def order_by_event_token(metric):
"""Sort by event_token for Metric"""
return metric.event_token | 96c0a13a9077a502901d2378e4bdf8c00493ed99 | 119,031 |
import copy
def update_state_prior_dirichlet(
pD, qs, lr=1.0, factors="all"
):
"""
Update Dirichlet parameters that parameterize the hidden state prior of the generative model
(prior beliefs about hidden states at the beginning of the inference window).
Parameters
-----------
- pD [numpy object array]:
The prior Dirichlet parameters of the generative model, parameterizing the agent's
beliefs about initial hidden states
- qs [numpy object array (where each entry is a numpy 1D array)]:
Current marginal posterior beliefs about hidden state factors
- lr [float, optional]:
Learning rate.
- factors [list, optional]:
Indices (in terms of range(num_factors)) of the hidden state factors to include in learning.
Defaults to 'all', meaning that the priors over initial hidden states for all hidden state factors
are updated.
"""
num_factors = len(pD)
pD_updated = copy.deepcopy(pD)
if factors == "all":
factors = list(range(num_factors))
for factor in factors:
idx = pD[factor] > 0 # only update those state level indices that have some prior probability
pD_updated[factor][idx] += (lr * qs[factor][idx])
return pD_updated | 12ac3849f053d1df0353612b034285fe869ee098 | 119,033 |
import struct
def pack_timestamp_resolution(base, exponent):
"""
Pack a timestamp resolution.
:param base: 2 or 10
:param exponent: negative power of the base to be encoded
"""
exponent = abs(exponent)
if base == 2:
return struct.pack('B', exponent | 0b10000000)
if base == 10:
return struct.pack('B', exponent)
raise ValueError('Supported bases are: 2, 10') | 845e40a6e493bb5381f2fc199bb131da16774608 | 119,040 |
def get_sentiment(text, analyzer):
"""Compute the sentiment label of a document
based on a given VADER analyzer.
Args:
text: A given text document.
analyzer: VADER analyser for sentiment analysy.
Returns:
A string representing the identified sentiment
based on the compound score returned by the analyser.
Possible values are: 'Positive', 'Negative' or
'Neutral'.
"""
compound_score = analyzer.polarity_scores(text)['compound']
if compound_score >= 0.05:
return 'positive'
if compound_score <= -0.05:
return 'negative'
return 'neutral' | 704b9d911257436d167c3919296fec75610681eb | 119,042 |
def split_according_to_profile(data):
"""Return 2 pandas.DataFrames. The first one contains only emission
spectra and the second all others. Usually only double-peak profiles.
The data pandas.DataFrame has to have a profile column.
"""
emission_index = data['profile'] == 'emission'
return data[emission_index], data[~emission_index] | 2d98ee8529177e95b2e22cd0c2b1e9ebf155b5b8 | 119,043 |
def reach(doc, key):
"""Returns a value from an embedded document.
>>> embeddedDict = {"first": {"second": "gold!"}}
>>> reach(embeddedDict, "first.second")
gold!
Parameters
----------
doc : dict compatible
Document returned by a MongoDB cursor
key : str
Key pointing to value being sought. Can use the MongoDB dot notation
to refer to values in embedded documents. Root level keys (without
dots) are perfectly safe with this function.
"""
parts = key.split(".")
if len(parts) > 1:
return reach(doc[parts[0]], ".".join(parts[1:]))
else:
return doc[parts[0]] | 894f007ac5349d210187888f19fc69ca177ff83a | 119,044 |
def get_W_tnk(W_tnk_raw):
"""給湯部のタンク容量
Args:
W_tnk_raw(float): JIS A 4113「太陽蓄熱槽」で規定される貯湯槽容量
Returns:
int: 貯湯槽容量の小数点第一位を四捨五入し整数値とした値
"""
return round(W_tnk_raw) | 64b2dd71fa65590781645dddde62c0daecb7ca18 | 119,046 |
def getRelationsAndRNByCharacter(cname: str, rn: int) -> str:
"""Return a query to get the relation and Ryu Number of a character.
The query retrieves the character's name, as well as the title and Ryu
Number of all games that the character appears in with a Ryu Number
greater than or equal to the passed value.
The resulting tuple takes the following form for appears_in as AI and
game as G: `(AI.cname: str, AI.gtitle: str, G.ryu_number: int)`
"""
return (f"SELECT AI.cname, AI.gtitle, G.ryu_number "
f"FROM appears_in AS AI "
f"JOIN game AS G ON G.title=AI.gtitle "
f"WHERE cname='{cname}' AND G.ryu_number>={rn};"
) | 3d6f2de4acafd03d3577eb3198fcf7100955a99c | 119,049 |
def remove_trailing_slash(id):
""" Remove trailing slash if not root"""
if id != '/':
id = id.rstrip('/')
return id | 38e6c153e192460147fc1d87779ccf95e2904d09 | 119,057 |
def find(n: int) -> int:
"""
This function return the sum of all multiples of 3 and 5.
"""
return sum([i for i in range(2, n + 1) if not i % 3 or not i % 5]) | a8d220921ca29f0ab93ad7ff2f27e18f2f9acfa8 | 119,059 |
def load_voltages(filename: str) -> list:
"""Load the adapter voltages from an input file
:param filename: Location of the voltage file
:return: List of adapter voltages
"""
with open(filename, 'r') as f:
voltages = f.readlines()
voltages = [int(v) for v in voltages]
return voltages | 1a47d57f88e2a549cb35f876e437f8e6ccb52ce7 | 119,060 |
def split_strings(s):
"""
Complete the solution so that it splits the string into pairs of two characters. If the string contains an odd number
of characters then it should replace the missing second character of the final pair with an underscore ('_').
:param s: a string input.
:return: the string as an array of pairs. If not even add an underscore to final character to make a pair.
"""
if len(s) % 2 != 0:
s += "_"
return [s[x:x + 2] for x in range(0, len(s), 2)] | 7a21ee40f1855a5eeaf96cff6551dc40a8e0d90e | 119,062 |
import random
def random_filter(objects, reduction_factor, seed=42):
"""
Given a list of objects, returns a sublist by extracting randomly
some elements. The reduction factor (< 1) tells how small is the extracted
list compared to the original list.
"""
assert 0 < reduction_factor <= 1, reduction_factor
rnd = random.Random(seed)
out = []
for obj in objects:
if rnd.random() <= reduction_factor:
out.append(obj)
return out | 12d2630b651b05bca1107c7d5c60c44c5dfc0698 | 119,064 |
import itertools
def limit(collection, count):
""":yaql:limit
Returns the first count elements of a collection.
:signature: collection.limit(count)
:receiverArg collection: input collection
:argType collection: iterable
:arg count: how many first elements of a collection to return. If count is
greater or equal to collection size, return value is input collection
:argType count: integer
:returnType: iterable
.. code::
yaql> [1, 2, 3, 4, 5].limit(4)
[1, 2, 3, 4]
"""
return itertools.islice(collection, count) | 3a15067eafc1880a917bbcb558be19ec7ca0b4d0 | 119,066 |
from datetime import datetime
def build_patterns(config, date=datetime.now()):
"""
Format date strings for patterns. Takes in global config.
Args:
config: global settings config
date: date str or object to override current datetime with
Returns:
The global settings config but with updated per cluster pattern sets
"""
cluster_settings = config['clusters']
# Set date override
if isinstance(date, str):
date = datetime.strptime(date, '%Y-%m-%d')
# Fill in patterns
for cluster in cluster_settings:
for repo, repo_config in cluster['repositories'].items():
repo_config['patterns'] = list(
map(
lambda x: datetime.strftime(date, x),
repo_config['patterns']
)
)
repo_config['patterns'] = list(set(repo_config['patterns']))
return config | ee58a2b7cdf046458c3cd4471af1ca1b7a4143a3 | 119,067 |
def index_containing_substring(list_str, pattern):
"""For a given list of strings finds the index of the element that
contains the substring.
Parameters
----------
list_str: list of str
pattern: str
pattern
Returns
-------
indices: list of int
the indices where the pattern matches
"""
indices = []
for i, s in enumerate(list_str):
if pattern in s:
indices.append(i)
return indices | 643670eb9fac011c9cfbb00611c880f15e45b375 | 119,074 |
def get_download_url(context, data_file):
"""
Returns the download url
"""
return data_file.download_url(context.request) | 76426e714e85350b088c5b6ac9389220837a18c5 | 119,075 |
def sanitize(name: str):
""" Normalize the given string by removing chars that are potentially problematic for the LP solver """
return name.replace("-", "__") | 63d024b6820098b46d3b5457f338511c60e65898 | 119,077 |
import re
def ExtractDescription(changelog_file, breaker_re):
"""Loads the file and extarcts the lines up to the specified breaker."""
with open(changelog_file, 'r') as f:
lines= f.readlines()
changelog = ''
for line in lines:
# Ignore any trailing empty lines.
if not changelog and not line.strip():
continue
# Stop at the breaker.
if re.match(breaker_re, line.strip()):
break
changelog += line
return changelog.strip() | b0df977197d3095d3491334c6be392100931ea22 | 119,078 |
def reverseComplement(s):
"""Return the reverse complement of a string."""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
t = ''
for base in s:
t = complement[base] + t
return t | 94e936005d148b11debd33095926d7040cca6979 | 119,079 |
import collections
def build_dicts(reviewText):
"""
Build dictionaries mapping words to unique integer values.
"""
counts = collections.Counter(reviewText).most_common()
dictionary = {}
for word, _ in counts:
dictionary[word] = len(dictionary)
return dictionary | 5a20752a044d63f1be433494c4c2ca83953bbe67 | 119,082 |
import torch
def polar_to_cart(r, theta):
"""Converts polar to cartesian coordinates.
"""
ret = torch.cat([torch.cos(theta).unsqueeze(0),
torch.sin(theta).unsqueeze(0)]).squeeze() * r
return ret | 3f4f2999bb5acc171906494c113d8f96c6991a27 | 119,087 |
def process_results(metrics):
"""Extract useful information from given metrics.
Args:
metrics: List of results dicts. These should have been written to disk by
training jobs.
Returns:
Dict mapping stats names to values.
Raises:
ValueError: If max_npe or max_global_repetitions values are inconsistant
across dicts in the `metrics` list.
"""
count = len(metrics)
success_count = 0
total_npe = 0 # Counting NPE across all runs.
success_npe = 0 # Counting NPE in successful runs only.
max_npe = 0
max_repetitions = 0
for metric_dict in metrics:
if not max_npe:
max_npe = metric_dict['max_npe']
elif max_npe != metric_dict['max_npe']:
raise ValueError(
'Invalid experiment. Different reps have different max-NPE settings.')
if not max_repetitions:
max_repetitions = metric_dict['max_global_repetitions']
elif max_repetitions != metric_dict['max_global_repetitions']:
raise ValueError(
'Invalid experiment. Different reps have different num-repetition '
'settings.')
if metric_dict['found_solution']:
success_count += 1
success_npe += metric_dict['npe']
total_npe += metric_dict['npe']
stats = {'max_npe': max_npe, 'max_repetitions': max_repetitions, 'repetitions': count, 'successes': success_count,
'failures': count - success_count, 'success_npe': success_npe, 'total_npe': total_npe}
if success_count:
# Only successful runs counted.
stats['avg_success_npe'] = stats['success_npe'] / float(success_count)
else:
stats['avg_success_npe'] = 0.0
if count:
stats['success_rate'] = success_count / float(count)
stats['avg_total_npe'] = stats['total_npe'] / float(count)
else:
stats['success_rate'] = 0.0
stats['avg_total_npe'] = 0.0
return stats | b5e8d32f8779b286a470bea874bb724ca6efb4e8 | 119,088 |
import torch
def decode_μ_law(waveform: torch.Tensor, μ: int = 255) -> torch.Tensor:
"""
Applies the element-wise inverse μ-law encoding to the tensor.
Args:
waveform: input tensor
μ: size of the encoding (number of possible classes)
Returns:
the decoded tensor
"""
assert μ & 1
μ = μ - 1
hμ = μ // 2
out = (waveform.type(torch.float32) - hμ) / hμ
out = torch.sign(out) / μ * (torch.pow(μ, torch.abs(out)) - 1)
return out | 4df6bc228bc019545418f144a46068504edf0e81 | 119,091 |
def get_meronyms(synset):
"""
Acquires meronyms for a given synset.
Args:
synset: WordNet synset
Returns:
set of meronyms
"""
return set(
synset.member_meronyms() + synset.substance_meronyms() + synset.part_meronyms()
) | 6ffe363074f432d555553063bffbe5b1b8a56ce5 | 119,092 |
def get_tree_root_element(tree_object):
"""
Return the root of the tree_object
:param tree_object: ElementTree instance for the xml_file
:return: Root object of the ElementTree instance
"""
return tree_object.getroot() | 58fc44541dfa0f934958e8bf2f005eff8bc8876e | 119,097 |
from pathlib import Path
def extract_hash(hashfile: Path, name: str) -> str:
"""Return MD5 hash from file of name:MD5 hashes.
:param hashfile: Path, path to file containing name:MD5 pairs
:param name: str, name associated with hash
"""
filehash = None
with open(hashfile, "r") as hhandle:
for line in [_.strip().split() for _ in hhandle if len(_.strip())]:
if Path(line[1]).name == name: # hash filename
filehash = line[0]
return str(filehash) | fea3d8a76858f1006bf1b523ed5cc1cc9f03eef4 | 119,105 |
def get_shape(ds):
"""Get number of bands, columns, and rows in raster."""
return(ds.RasterCount, # band count
ds.RasterXSize, # col count
ds.RasterYSize) | abab91e142a5c7d580279bca9c6d94b07b59b213 | 119,111 |
def sgd(w, dw, params={}):
"""
Perform Vanilla SGD for parameter update.
Arguments:
w: numpy array of current weight
dw: numpy array of gradient of loss w.r.t. current weight
params: dictionary containing hyper-parameters
- lr: float of learning rate
Outputs:
next_w: updated weight
params: updated dictionary of hyper-parameters
"""
# set default parameters
params.setdefault('lr', 1e-2)
# update w
next_w = w - params['lr'] * dw
return next_w, params | 56f07547435039e3f68a073ee34d47fb1b139021 | 119,119 |
from functools import reduce
def query_path(obj, path):
"""Returning the path `path` of the JSON object `obj`. Examples:
>>>a = {"a": [1, 2, 3], "b": [{"foo": "bar"}]}
>>>query_path(a, ["a"])
[1,2,3]
>>>query_path(a, ["a", 2])
3
>>>query_path(a, ["b", 0, "foo"])
"bar"
>>>query_path(a, ["b", 0, "foo"]) == a["b"][0]["foo"]
True
"""
return reduce(lambda x, y: x[y], path, obj) | de3f3295ccefe1c6a7db6164434d64231a712e02 | 119,123 |
def adjust_list_to_length(obj, desired_length):
"""
Returns a :obj:`list` of length :samp:`{desired_length}`,
by extending or truncating the :samp:`obj` :obj:`list`.
The sequence is extended by repeating the last element
of :samp:`{obj}`.
:type obj: sequence or :obj:`str`
:param obj: The sequence to be extended/truncated.
If :samp:`{obj}` is a string, it is converted to
a list using :samp:`{obj} = [{obj}, ]`.
:type desired_length: :obj:`int`
:param desired_length: The length of the returned :obj:`list`.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{desired_length}
which contains the elements of the :samp:`{obj}` sequence.
"""
if isinstance(obj, str):
obj = [obj, ]
ret_list = list(obj)
if len(ret_list) < desired_length:
ret_list = ret_list + [ret_list[-1], ] * (desired_length - len(ret_list))
elif len(ret_list) > desired_length:
ret_list = ret_list[:desired_length]
return ret_list | 9c570515ff88346fbde0785a2cc77cfd863cc4b9 | 119,128 |
import torch
def expand_as_one_hot(input_, C, ignore_label=None):
"""
Converts NxSPATIAL label image to NxCxSPATIAL, where each label gets converted to its corresponding one-hot vector.
NOTE: make sure that the input_ contains consecutive numbers starting from 0, otherwise the scatter_ function
won't work.
SPATIAL = DxHxW in case of 3D or SPATIAL = HxW in case of 2D
:param input_: 3D or 4D label image (NxSPATIAL)
:param C: number of channels/labels
:param ignore_label: ignore index to be kept during the expansion
:return: 4D or 5D output image (NxCxSPATIAL)
"""
assert input_.dim() in (3, 4), f"Unsupported input shape {input_.shape}"
# expand the input_ tensor to Nx1xSPATIAL before scattering
input_ = input_.unsqueeze(1)
# create result tensor shape (NxCxSPATIAL)
output_shape = list(input_.size())
output_shape[1] = C
if ignore_label is not None:
# create ignore_label mask for the result
mask = input_.expand(output_shape) == ignore_label
# clone the src tensor and zero out ignore_label in the input_
input_ = input_.clone()
input_[input_ == ignore_label] = 0
# scatter to get the one-hot tensor
result = torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1)
# bring back the ignore_label in the result
result[mask] = ignore_label
return result
else:
# scatter to get the one-hot tensor
return torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1) | 5ced4e82c37f1d803a4b92eb9e089a0d90e7226e | 119,129 |
import hashlib
def check_password(tagged_digest_salt, password):
"""
Checks the OpenLDAP tagged digest against the given password
"""
# the entire payload is base64-encoded
assert tagged_digest_salt.startswith('{SSHA}')
# strip off the hash label
digest_salt_b64 = tagged_digest_salt[6:]
# the password+salt buffer is also base64-encoded. decode and split the
# digest and salt
digest_salt = digest_salt_b64.decode('base64')
digest = digest_salt[:20]
salt = digest_salt[20:]
sha = hashlib.sha1(password)
sha.update(salt)
return digest == sha.digest() | 63f2e58f71c47d03c5d60dbbc75c94babec54275 | 119,130 |
def vol_dry_clay(phi_n, phi_d, HI_dry_clay):
"""
Calculates volume of dry clay content
Parameters
----------
phi_n : float
Neutron porosity (decimal)
phi_d : float
Density porosity (decimal)
HI_dry_clay : float
Hydrogen Index of the average dry clay-mineral mixture within the formation (decimal)
Returns
-------
float
Returns volume of dry clay (decimal)
References
----------
Juhasz, I. (1979) ‘The Central Role of Qv and Formation-Water Salinity in the Evaluation of Shaly Formations’,
The Log Analyst, vol. 20, no. 4, pp. 1–11.
Juhasz, I. (1981) ‘Normalised Qv--the key to shaly sand evaluation using the Waxman-Smits equation in the absence of core data’,
Society of Professional Well Log Analysts 22nd Annual Logging Symposium Transactions, no. Paper Z, p. 36.
"""
return phi_n - phi_d / HI_dry_clay | 9775474c7eb1d7b26fd41c60074813e1f67fc4cf | 119,135 |
def get_source_instance_from_method(source_method):
"""Obtain the source object from a method that belongs to said object.
:param source_method: Source method that is used
:type source_method: method
:returns: source instance the input method belongs to
:rtype: :class:`.Source`
"""
return source_method.__self__ | 86512113663c93ffa00ad434af5c7d5d386c001d | 119,136 |
def find_closest_date( ts, date, search_min = True) :
"""
Summary:
Function that search for the closest date in a time series using
a specific criteria (min/max)
Arguments:
ts - the time series where to search for the event
date - the event date
search_min - if true it searches for minumum value, if false for the
maximum
Returns:
close_date - the date in the time series satisfying the criteria
"""
# find the previous and following date
previous_date_ind = len((date - ts.index).days[(date - ts.index).days >= 0]) - 1
if ts.index[previous_date_ind] == date :
return date
next_date_ind = previous_date_ind + 1
if search_min :
if ts[previous_date_ind] > ts[next_date_ind] :
return ts.index[next_date_ind]
else:
return ts.index[previous_date_ind]
else :
if ts[previous_date_ind] < ts[next_date_ind] :
return ts.index[next_date_ind]
else:
return ts.index[previous_date_ind] | 2133ffd7cceb37e44248b8727b3ffd8be57e7a63 | 119,138 |
def is_string_type(s):
"""
True iff s is a string.
"""
return type(s) == type('') | 4865ed3806057fca5a96663d40c6ddcdb2be333c | 119,140 |
def r_int(n):
"""round() but it returns an integer"""
return int(round(n)) | 827840173da5dffd261de739b46e04ab9c0a473b | 119,145 |
def salary_job_stats(df):
"""
Get the stats (count, mean, std, min, 25%, 50%, 75%, max) for each job
(Data Scientist, BI, Data Analyst, Developpeur, Data Engineer)
Parameter:
df : a dataframe with salary and job title
Returns:
salary_df : a dataframe with mean/std salary for each job
"""
# Stats for each job (even junior, alternance, etc)
salary_stats = df.groupby('Job')['Salary'].describe().reset_index()
salary_df = salary_stats.T # To have job title columns
# Keep only 5 jobs titles
col_to_keep = ['data scienstist', 'data analyst', 'business intelligence',
'developpeur']
salary_df.drop(salary_df.columns.difference(col_to_keep), axis=1,
inplace=True)
return salary_df | c2291b1f4aeb542f196441a16e6accfad1844955 | 119,148 |
def offset_tuple_col(df, offset_col, tuple_col):
"""Takes a dataframe (df) with at least two columns
(`offset_col` and `tuple_col`)
and offsets all values given in tuple_col using an offset
Parameters
----------
df: `pd.DataFrame`
input dataframe which includes desired values
offset_col: `str`
The column with the offset value
tuple_col: `str`
The column with tuples including real (float/double/int) numbers
Returns
-------
df_offset : `pd.Dataframe`
A dataframe which is same as ``df`` and only the
``tuple_col`` is altered by adding an offset to
all values in each tuple.
"""
return df.apply(
lambda row: tuple(
e + row[offset_col] for e in row[tuple_col]),
axis=1) | 0152c3acef906fbcecccea3f5fbf6a9cc4f5c9f0 | 119,149 |
def v5_tail(iterable, n):
"""Return the last n items of given iterable.
Instead of storing every item in the given iterable into a new sequence,
we could store just the last n items we've seen.
Here we're using a * to unpacking the last (n-1) items into a new list
with our current item at the end and then reassigning that to items.
This use of * expressions only works in Python 3.5+.
Note that this doesn't work when n is 1. When n is 1, (n-1) will be 0 which
means our items list will grow continually instead of only keeping the
last 1 item.
"""
items = []
if n <= 0:
return []
for item in iterable:
items = [*items[-(n-1):], item]
return items | 888640d484aebe74962f8f6dee7fe1bf1808a86a | 119,154 |
def get_selected_file_name(sel_user, sel_game):
"""
Returns the name of the file which will be used
"""
if sel_game in ("A", "B", "C", "D", "E", "F"):
print("Working with file: USER{0}_game_{1}".format(sel_user, sel_game))
_csv_file = "resurse/" "USER{0}_game_{1}.csv".format(sel_user, sel_game)
else:
print("Working with file: USER{0}_{1}".format(sel_user, sel_game))
_csv_file = "resurse/" "USER{0}_{1}.csv".format(sel_user, sel_game)
return _csv_file | c54d9e24e1292a4ebcd98302694f917b540537ab | 119,155 |
def get_valid_requirements(req_path):
"""
Parses a requirements.txt file and removes all comments
leaving only the actual requirements
"""
return [r for r in open(req_path, "r").readlines() if r[0] != "#"] | a314d8554f0cdda39139049f7e38346d5a6ed5eb | 119,156 |
def count_hour_used(time_used):
"""
This function will return the total amount of hours used using the imput time_used in seconds
"""
return (time_used/60/60) | 244b932342ca1edeb738bf3831a48fd5b9e89147 | 119,161 |
def find_parent(lst, i, dist):
"""Finds the parent node of the given node in a pre-order traversal list.
Args:
lst: a list that contains a pre-order traversal of a free-tree
i: the index of the actual node
dist: the distance of the actual node
Returns:
int: the index of the node's parent (-1 if it has no parent)
"""
while i >= 0:
if lst[i] < dist:
break
i -= 1
return i | 1172d90976eef129781df06470df7c37b4d80f70 | 119,164 |
from typing import List
import re
def version_constraint_free_packages(setup_cfg_packages: str) -> List[str]:
"""
Remove version constraints from the packages listed in `options_field` (provided
as input argument or read from `setup.cfg`).
Parameters
----------
setup_cfg_packages: str
Packages with optional version constraints as parsed from a syntactically
correct `setup.cfg` file.
Returns
-------
List[str]
`install_requires` packages without version constraints.
"""
# to be able to match the last package in all cases
setup_cfg_packages = "\n" + setup_cfg_packages + "\n"
# get plain (version constraint free) package names, they are listed using a
# "list-semi": dangling list or string of semicolon-separated values
# https://setuptools.pypa.io/en/latest/userguide/declarative_config.html#options
packages_without_version = re.findall(
r"(?<=(?:\n|;)).*?(?=<|=|>|!|;|\n|;])", setup_cfg_packages
)
# strip possible remaining spaces and drop empty string elements
packages_without_version = [s.strip() for s in packages_without_version]
packages_without_version = list(filter(None, packages_without_version))
return packages_without_version | 171cfe26b8cf7e2a5d52e3a5f4d2f739ccaafc16 | 119,167 |
def Dr(a_list):
"""assumes a_list is a list of strings
returns a new list, of the elements of a_list prepended with 'Dr.' """
list2 = []
for elem in a_list:
mod_elem = "Dr." + elem
list2.append(mod_elem)
return list2 | 510a14a2daa09729cf30cc187c72f9be190b5af6 | 119,168 |
def _IsInt(x):
"""Returns True if the input can be parsed as an int."""
try:
int(x)
return True
except ValueError:
return False | dab7b6a0765dee718a562dbde62f011082e8ea84 | 119,170 |
from typing import List
def read_file(file_name: str) -> List[str]:
"""
Read in a text file
Parameters
----------
file_name : str
The text file path as a str to read
Returns
-------
List[str]
The text files content
"""
with open(file_name, "r") as f:
return f.readlines() | c2aab7b1bbcaa765d722ee80e173e1c627bcad55 | 119,173 |
def close_plot(__window, __event, plot, parent):
"""
Function to close the plot.
:param gtk.Window __window: the gtk.Window() that is being destroyed.
:param gtk.gdk.Event __event: the gtk.gdk.Event() that called this method.
:param matplotlib.FigureCanvas plot: the matplotlib.FigureCanvas() that was
expanded.
:param gtk.Widget parent: the original parent gtk.Widget() for the plot.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
plot.reparent(parent)
return False | f7647c04b932368ce09f961156aa0fe0aa15624c | 119,176 |
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image | cce01353bde95b88b81110befcea98f0915abc7c | 119,177 |
def get_dict_from_region_string(region_string):
"""
get_dict_from_region_string
===========================
Get a dictionary from a tabix styled region string. Keys will include "chrom", "start", and "end"
Parameters:
-----------
1) region_string: (str) A tabix styled region string. (Example: chr14:123456-124456)
Returns:
++++++++
1) (dict) A dictionary with keys: 'chrom', 'start', 'end' and genomic region info as values
"""
region_list = region_string.strip().replace(",", "").replace("-", ":").split(":")
region_dict = {
"chrom": str(region_list[0]),
"start": int(region_list[1]),
"end": int(region_list[2]),
}
return region_dict | 5597053989b1eae66a43cff35186d3edf11288c9 | 119,178 |
def gen_profiles(game):
"""Generate profile types and names"""
return {
'uniform': lambda: [game.uniform_mixture()],
'pure': game.pure_mixtures,
'biased': game.biased_mixtures,
'role biased': game.role_biased_mixtures,
'random': lambda: game.random_mixtures(game.num_role_strats.prod()),
} | bfb0e0a34d0a5d9242e2c61e953e41c25085771a | 119,180 |
def extend(dict1, dict2):
"""Create a new dict from the values of dict1 added/updated by the values from dict2."""
return dict(dict1, **dict2) | 447f21e26b00cbeea7457ea6c8b242acd5dccba3 | 119,181 |
def url_is(white_list):
"""
Function generator.
Args:
white_list (dict): dict with PREFIXES and CONSTANTS keys (list values).
Returns:
func: a function to check if a URL is...
"""
def func(url):
prefixes = white_list.get('PREFIXES', ())
for prefix in prefixes:
if url.startswith(prefix):
return True
constants = white_list.get('CONSTANTS', ())
for exact_url in constants:
if url == exact_url:
return True
return False
return func | f31ee3593907de47e1e00c46a1e7939c032c9113 | 119,185 |
def convert_string_to_list(full_str: str) -> list:
"""
Take a SPACE-DELIMITED string and split it into a list
"""
return full_str.split(" ") | f240614268dca3f15d8dba50568367af6f9c3cb0 | 119,186 |
def multi_unsqueeze(tensor, times, prepend=True):
"""
Unsqueeze a tensor multiple times towards direction.
Useful for broadcasting operations.
:param tensor: The tensor to unsqueeze
:param times: The number of singular dimensions to add.
:param prepend: whether to prepend unsqueezes or append them.
:return: The unsqueezed tensor, which is a view and not a copy.
"""
old_shape = list(tensor.shape)
extra_dims = [1]*times
if prepend:
return tensor.view(extra_dims + old_shape)
else:
return tensor.view(old_shape, extra_dims) | 921f2f7777013a88929fd7164b079601b0451dc8 | 119,192 |
def is_verb(node):
"""
A verb is either a non-terminal VP (verb phrase), a terminal verb (VB, VBZ,
etc.), or a modal verb (a sort of auxiliary verb).
"""
return node.label()[0] == 'V' or node.label() == 'MD' | 60f3557cd3c462715774c96d33334bc5bb4628cf | 119,200 |
def default_get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, a `http_method_XXXX_serializer_class`, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class | 9759bce5e65b6c6f3764dcaaf3cb4fa653c4ca67 | 119,202 |
def get_return_type(cf=None):
"""Hack to get the return value of a function.
:param cf: the result of ``decompile()``
:type cf: :class:`ida_hexrays.cfuncptr_t`
:return: Type information for the return value
:rtype: :class:`tinfo_t`
"""
if not cf:
raise ValueError
ty = cf.type # tinfo_t (entire prototype)
ti = ty.get_rettype() # tinfo_t (return value)
return ti | 0182d43ee602830db51000a32588e904a4851b81 | 119,204 |
def mlsum_summarization(dict: dict):
"""
Map function for mlsum summarization task.
:param dict: set of features
:return: input/output list of size 2
"""
input = dict['text']
output = dict['summary']
return [input, output] | 28cddb38dfd52b4606abcfb0369e7c7df128bd92 | 119,208 |
def dump(scale, separator=' '):
"""
Pretty-print the notes of a scale. Replaces b and # characters
for unicode flat and sharp symbols.
"""
return separator.join(['{:<3s}'.format(x) for x in scale]) \
.replace('b', '\u266d') \
.replace('#', '\u266f') | 852d4bcef68c45f6cb32bc2ece466d99d113009d | 119,209 |
def get_token(fco_api):
"""
Get API token.
:param fco_api: FCO API object
:return: API public token
"""
return fco_api.getAuthenticationToken(automaticallyRenew=True).publicToken | a45de8240c6bd9b05aca7fa43e8cb8306571361f | 119,211 |
def _has_unique_constraints(model_or_instance):
"""
Returns a boolean to indicate if the given model has any type of unique
constraint defined (e.g. unique on a single field, or meta.unique_together).
To support concrete model inheritance we state that uniqueness checks
should only be performed on the class that the defines the unique constraint.
Note - you can see a much more verbose implementation of this in
django.db.models.base.Model._get_unique_checks() - but we implement our
own logic to exit early when the first constraint is found.
"""
meta_options = model_or_instance._meta
# we may get an instance here, so ensure we have a reference to the
# model class
model_class = meta_options.model
unique_together = meta_options.unique_together
unique_fields = any(
field.unique and field.model == model_class
for field in meta_options.fields
)
return any([unique_fields, unique_together]) | ce03f351a1456ae101b166c14e8d0c9a488c12c2 | 119,212 |
import collections
def vcf_to_concordance(variants_to_vcfs_dict):
"""Returns a mapping from each VCF caller to a mapping of the number of
VCFs in concordance to the number of calls they concord on.
"""
concordance_counts = collections.defaultdict(lambda: collections.defaultdict(int))
for vcfs in variants_to_vcfs_dict.itervalues():
for vcf in vcfs:
concordance_counts[vcf][len(vcfs)] += 1
return concordance_counts | 950e444a3dad9924119376ca7d9bea6ab2c0175a | 119,213 |
import math
def compute_idf(n_documents, raw_doc_freq):
"""Compute inverse document frequencies:
idf(t, D) = log |D|/|{d in D: t in d}|
i.e. total number of documents over number of documents containing
the term. Since this is within-corpus IDF we know by construction
that the denominator will never be zero."""
log = math.log
return { word: log(n_documents/doc_freq)
for word, doc_freq in raw_doc_freq.items() } | eb9453ef4907b8d83a302de9ac2b2519b0108683 | 119,217 |
def datetime_to_hour(date_time):
"""Convert a datetime object to the fractional hour of the day as a float (03:15pm --> 15.25)"""
return date_time.hour + date_time.minute / 60. + date_time.second / 3600. \
+ date_time.microsecond / (3600. * 1e6) | d42f02393cc318c6c2157c4b3b1f13110b9e765d | 119,220 |
def make_output_path (type_name, type_root_namespace, type_namespace):
"""Create the output path for a generated class file.
Args:
type_name (str): Name of the class.
type_namespace (str): The class's namespace.
Returns:
str: The output file path.
"""
return f'../{type_root_namespace}/{type_namespace}/{type_name}.generated.cs' | bc63de3130f5c47b9bb6334e07414992a0d763b4 | 119,223 |
def is_power_of_2(num):
"""
Returns whether num is a power of two or not
:param num: an integer positive number
:return: True if num is a power of 2, False otherwise
"""
return num != 0 and ((num & (num - 1)) == 0) | e357263adf9616cc9839889880868b58c4cc3af2 | 119,224 |
def bit_mask(width: int) -> int:
"""All ones bit mask for a given width.
Args:
width: Width of bit mask.
Returns:
Bit mask.
Example:
>>> bin(bit_mask(width=4))
'0b1111'
"""
return (2 ** width) - 1 | 2b43be06b6d668eac866d89863bb2a386ad2cbed | 119,225 |
import json
def load_config(config_filepath):
""" Load a session configuration from a JSON-formatted file.
Args:
config_filepath: string
Returns: dict
"""
try:
config_file = open(config_filepath, 'r')
except IOError:
print('No readable config file at path: ' + config_filepath)
else:
with config_file:
return json.load(config_file) | 5105c73c811cd1d0ccedfa1f3c7bbb3804472182 | 119,229 |
from pathlib import Path
import yaml
def get_manifests(mfdir):
"""
Read in all manifests and return as a dict
"""
manifests = {}
for mf in ['exe', 'input', 'restart']:
mfpath = Path(mfdir)/"{}.yaml".format(mf)
with mfpath.open() as fh:
manifests[mfpath.name] = list(yaml.safe_load_all(fh))[1]
return manifests | 13271f3df8cd45475a340bcef3c00567fb220065 | 119,232 |
import copy
def make_hist_context(hist, context):
"""Update *context* with the context
of a :class:`.histogram` *hist*.
Deep copy of updated context is returned.
"""
all_context = copy.deepcopy(context)
hist_context = {
"histogram": {
"dim": hist.dim,
"nbins": hist.nbins,
"ranges": hist.ranges
}
}
all_context.update(hist_context)
return all_context
# return copy.deepcopy(all_context) | 9e3e3dbd861b796f93a69d353c07885fb3e52977 | 119,235 |
def subtract(left, right):
"""Subtract two numbers.
>>> from math_operations import subtract
>>> subtract(2, 2)
0
>>> subtract(-3, -1)
-2
:param left: minuend (left operand)
:param right: subtrahend (right operand)
:return: difference between left and right operand
"""
return left - right | 00dc739cd8ce1c51d70b7fc850b1a8384d97acc7 | 119,237 |
import tempfile
def tmpfile(pref="peda-", is_binary_file=False):
"""Create and return a temporary file with custom prefix"""
mode = 'w+b' if is_binary_file else 'w+'
return tempfile.NamedTemporaryFile(mode=mode, prefix=pref) | 62f5d2c9ea4d4803344adca0e346c79ab5ca77b2 | 119,241 |
def get_utxos(tx, address):
"""
Given a transaction, find all the outputs that were sent to an address
returns => List<Dictionary> list of UTXOs in bitcoin core format
tx - <Dictionary> in bitcoin core format
address - <string>
"""
utxos = []
for output in tx["vout"]:
if "addresses" not in output["scriptPubKey"]:
# In Bitcoin Core versions older than v0.16, native segwit outputs have no address decoded
continue
out_addresses = output["scriptPubKey"]["addresses"]
amount_btc = output["value"]
if address in out_addresses:
utxos.append(output)
return utxos | 3e01f8515e448c6e4c7aeb29c882dc069f5ed692 | 119,242 |
from typing import Dict
def column_dict_parser(num_comps: int, csv_dict: Dict) -> Dict:
"""Sorts data and separates out for each compound"""
comps_shift_data = {}
for i in range(1, num_comps + 1):
# Creates list of possible variables for a compound
possible_variables = [
str(i) + "_cshift",
str(i) + "_hshift",
str(i) + "_multi",
str(i) + "_coupling",
]
# Find the variables for a single compound
# from the input data
found_variables = {
key: val
for key, val in csv_dict.items()
if key in possible_variables and bool(csv_dict.get(key))
}
comps_shift_data[str(i)] = found_variables
return comps_shift_data | f9bc59e77cd09ad7694364afffc6410706722ab9 | 119,247 |
from typing import Optional
from typing import Sequence
from typing import Union
from typing import List
def rjoin(sep: Optional[str], array: Sequence[str]) -> Union[str, List[str]]:
"""Concatenate string with additional separator at the end.
Return a string which is the concatenation of the strings in iterable.
The 'sep' string is used as separator between elements of array and
is appended to the end of result, unless the array is empty. The 'sep'
argument can be 'None' is which case original array is returned.
"""
if sep is not None:
if len(array) == 0:
return ''
return sep.join(array) + sep
return list(array) | a1309b5c3db3ab0e8bfb53a57f2c944d918cc56a | 119,251 |
def get_norm_distance(length: int, distance: float) -> float:
"""
Calculate the normalized distance
:param length: int
:param distance: float
:rtype: float
"""
return distance/(length*2) | 4fe8a30fd8c8eb51abe7efba869c59a408390527 | 119,252 |
def grayscale(image):
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L") | 0949f518a602b18cffe561338c919f8b63715974 | 119,263 |
def check_shell_out(msg):
"""Function looks for an error in namedtuple of exec_shell and returns the output string only if valid
Args:
msg((namedtuple(shell_out[err=(str), output=(str)))): The message to be printed
Returns:
(str): Returns the msg output if no error
Raises:
OSError: if shell_out contains an error
"""
if msg.err != '':
raise OSError('Shell command failed with\n\t%s' % msg.err)
return msg.output | 74f0b815a5cd132e0279594728f9760bc2848b39 | 119,264 |
def filter_dict(d: dict, keys: list) -> dict:
"""
Given a dictionary, filter the contents and return a new dictionary containing the given keys
Parameters
----------
d : dict
dictionary to filter
keys : list
list of keys to include in new dictionary
Returns
-------
dict
filtered dictionary
"""
nd = {k: d[k] for k in d.keys() if k in keys}
return nd | 8711977a9160ec0e95dcb387287164810d54eede | 119,265 |
import re
def normalise_shelfmark(sm):
"""Normalize shelfmarks."""
sm = "".join(sm.split())
sm = re.sub(r',|\.|;|:', r'', sm)
sm = re.sub(r'\[|\{', r'\(', sm)
sm = re.sub(r'\]|\}', r'\)', sm)
sm = sm.replace('_', '-')
sm = sm.replace('&', ' & ')
sm = re.sub(r'(\d+)', lambda m: m.group(1).rjust(7, ' '), sm)
return sm.upper() | 29d9276c8a56f17786424f66f52b29241ae29860 | 119,266 |
def sanitize_size(x):
""" Ensure segments are at least represented by one pixel. """
if x < 1:
x = 1
return int(x) | 41e214d19c65a340940fdae6458ba3bb4aaf718e | 119,267 |
def is_build_tag(tag: str) -> bool:
"""
Test if the given *tag* looks like one of our build tags.
"""
return tag.startswith("build-") | f67103ed4413caee23db2e3669b8cf3bace7751a | 119,268 |
from typing import List
def get_name_prefix(
name: str,
prefixes: List[str],
):
"""
Get a name's prefix from some available candidates.
Parameters
----------
name
A name string
prefixes
Available prefixes.
Returns
-------
Prefix of the name.
"""
search_results = [pre for pre in prefixes if name.lower().startswith(pre)]
if len(search_results) == 0:
return None
elif len(search_results) >= 2:
raise ValueError(
f"Model name `{name}` is mapped to multiple models, "
f"which means some names in `{prefixes}` have duplicate prefixes."
)
else:
return search_results[0] | 7d3d33c4aacde5515bc41bd5f9aa26c500e18931 | 119,269 |
def eliminate_duplicated_lines(txt):
"""
If there are duplicated lines in the report, this function can be used to
correct this.
:param txt: str
:return: list of lines with repetition.
"""
assert type(txt)==str
rep_lines = txt.split('\n')
res = [rep_lines[0]]
for ln in rep_lines:
if ln == '':
res.append(ln)
elif ln != res[-1]:
res.append(ln)
return res | 4c6e27b493ca41fe30c23edc8d052f22028b1713 | 119,271 |
def empty_data(data):
"""Check to see if data is an empty list or None."""
if data is None:
return True
elif type(data) == list:
if len(data) == 0:
return True
return False | 3dddfe9e561dafb7bdf73b35605097f4d7691c3c | 119,272 |
from pathlib import Path
def fpga_uart(pytestconfig):
"""Return the path to the UART attached to the FPGA."""
path = Path(pytestconfig.getoption('fpga_uart')).resolve()
assert path.exists() and not path.is_dir()
return path | e39b443ccae8522dcc3eeee97be914ca4182ffbd | 119,276 |
def combinations(source, target):
"""All combinations between two lists source and target."""
c = []
for s in source:
for t in target:
c.append((s, t))
return c | 5cc0d4d35604f1c3ade25c6941b74200740f4fcd | 119,277 |
def output(tf_var):
"""
Generate command for 'terraform output <tfvar>'.
Args:
tf_var: name of terraform output variable to return the output of.
Returns:
tf_command: list of command-line arguments to run terraform with.
"""
tf_command = ['terraform', 'output', tf_var]
return tf_command | ca77108a046203134fcf873ca227fe4bd62f8028 | 119,279 |
def gcd_r(x, y):
"""
求x和y的最大公约数,递归实现方式
欧几里得的辗转相除法:gcd(x,y)=gcd(y,x%y)
:param x: 正整数
:param y: 正整数
:returns: x和y的最大公约数
"""
return x if y == 0 else gcd_r(y, x % y) | bad82bb0d36e8151ffcbf5fe20ae27c288f7a925 | 119,280 |
def dist2sconscc(compiler):
"""This converts the name passed to distutils to scons name convention (C
compiler). compiler should be a CCompiler instance.
Example:
--compiler=intel -> intelc"""
compiler_type = compiler.compiler_type
if compiler_type == 'msvc':
return 'msvc'
elif compiler_type == 'intel':
return 'intelc'
else:
return compiler.compiler[0] | 2d060b06233440d328d72202533f9bf09dd03450 | 119,283 |
import bisect
def find_gt(a, x):
"""Find smallest value strictly greater than x"""
i = bisect.bisect(a, x)
if i != len(a):
return a[i]
return None | 5f698b5370ce358a4da35311ed67fa1dc02131d5 | 119,284 |
def get_str_ip(list_ip):
"""
turns a list of 4 integers into IP address format.
"""
return ".".join([str(_) for _ in list_ip]) | 83e460d71a0f259b8c143f600acc86ada350cb0a | 119,286 |
def dns_name_decode(name):
"""
DNS domain name decoder (bytes to string)
name -- example: b"\x03www\x07example\x03com\x00"
return -- example: "www.example.com."
"""
# ["www", "example", "com"]
name_decoded = []
off = 1
while off < len(name):
# b"xxx" -> "xxx"
name_decoded.append(name[off: off + name[off - 1]].decode())
off += name[off - 1] + 1
return ".".join(name_decoded) + "." | 1135c4478c6242ed89d440767d14b63f063515e4 | 119,288 |
def get_threat_component(components, threat_type):
"""
Gets a certain threat component out a list of components
Args:
components: List of threat components
threat_type: Type of threat we are looking for
Returns: Either the component that we asked for or None
"""
for component in components:
if component.get('name') == threat_type:
return component
else:
return None | c0ab0331ea30d858d9115284ac6ce308d71cd265 | 119,290 |
def reconstruct_include_node(node):
"""Given an include node, reconstruct the original text."""
extra_context = " ".join(["%s=%s" % (item[0], item[1].token)
for item in node.extra_context.viewitems()])
if extra_context:
return '{%% include %s with %s %%}' % (
node.template.token, extra_context)
else:
return '{%% include %s %%}' % node.template.token | 30a25ae54f0861aa2f6a9889c7dacdcbeebd38d5 | 119,291 |
def ToText(value):
""" Covert int/float to a string
:param any value: int or float.
:return: string(str)
"""
if not isinstance(value,str): text=str(value)
else:text=value
return text | de2b190ed1ae409e203b23360ef5bf566391a3c1 | 119,294 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.